resource.py 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585
  1. import json
  2. import re
  3. import logging
  4. from py2neo import Relationship
  5. import pandas as pd
  6. from app.services.neo4j_driver import neo4j_driver
  7. from app.core.graph.graph_operations import create_or_get_node, relationship_exists, get_node, connect_graph
  8. import time
  9. from datetime import datetime
  10. from app.core.meta_data import get_formatted_time, translate_and_parse
  11. from app import db
  12. from sqlalchemy import text
  13. logger = logging.getLogger("app")
  14. def serialize_neo4j_object(obj):
  15. """
  16. 将Neo4j对象转换为可JSON序列化的格式
  17. Args:
  18. obj: Neo4j节点或属性值
  19. Returns:
  20. 序列化后的对象
  21. """
  22. if hasattr(obj, 'year'): # DateTime对象
  23. # 将Neo4j DateTime转换为字符串
  24. return obj.strftime("%Y-%m-%d %H:%M:%S") if hasattr(obj, 'strftime') else str(obj)
  25. elif hasattr(obj, '__dict__'): # 复杂对象
  26. return str(obj)
  27. else:
  28. return obj
  29. def serialize_node_properties(node):
  30. """
  31. 将Neo4j节点属性序列化为可JSON化的字典
  32. Args:
  33. node: Neo4j节点对象
  34. Returns:
  35. dict: 序列化后的属性字典
  36. """
  37. properties = {}
  38. for key, value in dict(node).items():
  39. properties[key] = serialize_neo4j_object(value)
  40. return properties
  41. def get_formatted_time():
  42. """获取格式化的当前时间"""
  43. return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
  44. def get_node_by_id(label, id):
  45. """根据ID获取指定标签的节点"""
  46. try:
  47. with neo4j_driver.get_session() as session:
  48. # 确保id为整数
  49. try:
  50. id_int = int(id)
  51. except (ValueError, TypeError):
  52. logger.error(f"节点ID不是有效的整数: {id}")
  53. return None
  54. cypher = f"MATCH (n:{label}) WHERE id(n) = $id RETURN n "
  55. result = session.run(cypher, id=id_int)
  56. record = result.single()
  57. return record["n"] if record else None
  58. except Exception as e:
  59. logger.error(f"根据ID获取节点失败: {str(e)}")
  60. return None
  61. def get_node_by_id_no_label(id):
  62. """根据ID获取节点,不限制标签"""
  63. try:
  64. with neo4j_driver.get_session() as session:
  65. # 确保id为整数
  66. try:
  67. id_int = int(id)
  68. except (ValueError, TypeError):
  69. logger.error(f"节点ID不是有效的整数: {id}")
  70. return None
  71. cypher = "MATCH (n) WHERE id(n) = $id RETURN n"
  72. result = session.run(cypher, id=id_int)
  73. record = result.single()
  74. return record["n"] if record else None
  75. except Exception as e:
  76. logger.error(f"根据ID获取节点失败: {str(e)}")
  77. return None
  78. def delete_relationships(start_node, rel_type=None, end_node=None):
  79. """删除关系"""
  80. try:
  81. with neo4j_driver.get_session() as session:
  82. if rel_type and end_node:
  83. cypher = "MATCH (a)-[r:`{rel_type}`]->(b) WHERE id(a) = $start_id AND id(b) = $end_id DELETE r"
  84. cypher = cypher.replace("{rel_type}", rel_type)
  85. session.run(cypher, start_id=start_node.id, end_id=end_node.id)
  86. elif rel_type:
  87. cypher = "MATCH (a)-[r:`{rel_type}`]->() WHERE id(a) = $start_id DELETE r"
  88. cypher = cypher.replace("{rel_type}", rel_type)
  89. session.run(cypher, start_id=start_node.id)
  90. else:
  91. cypher = "MATCH (a)-[r]->() WHERE id(a) = $start_id DELETE r"
  92. session.run(cypher, start_id=start_node.id)
  93. return True
  94. except Exception as e:
  95. logger.error(f"删除关系失败: {str(e)}")
  96. return False
  97. def update_or_create_node(label, **properties):
  98. """更新或创建节点"""
  99. try:
  100. with neo4j_driver.get_session() as session:
  101. node_id = properties.pop('id', None)
  102. if node_id:
  103. # 更新现有节点
  104. set_clause = ", ".join([f"n.{k} = ${k}" for k in properties.keys()])
  105. cypher = f"MATCH (n:{label}) WHERE id(n) = $id SET {set_clause} RETURN n"
  106. result = session.run(cypher, id=int(node_id), **properties)
  107. else:
  108. # 创建新节点
  109. props_str = ", ".join([f"{k}: ${k}" for k in properties.keys()])
  110. cypher = f"CREATE (n:{label} {{{props_str}}}) RETURN n"
  111. result = session.run(cypher, **properties)
  112. record = result.single()
  113. return record["n"] if record else None
  114. except Exception as e:
  115. logger.error(f"更新或创建节点失败: {str(e)}")
  116. return None
  117. # 数据资源-元数据 关系节点创建、查询
  118. def handle_node(receiver, head_data, data_source=None, resource_type=None):
  119. """处理数据资源节点创建和关系建立"""
  120. try:
  121. # 验证必要参数
  122. if not resource_type:
  123. raise ValueError("resource_type参数不能为空")
  124. # 更新属性
  125. update_attributes = {
  126. 'name_en': receiver.get('name_en', receiver.get('name_zh', '')),
  127. 'create_time': get_formatted_time(),
  128. 'type': resource_type # 直接使用传入的资源类型值
  129. }
  130. # 记录describe字段是否存在于创建数据中
  131. if "describe" in receiver:
  132. logger.info(f"创建资源,describe字段将被设置为: {receiver.get('describe')}")
  133. else:
  134. logger.info("创建资源,describe字段不在创建数据中")
  135. if 'additional_info' in receiver:
  136. del receiver['additional_info']
  137. # 从receiver中移除data_source属性,避免将复杂对象作为节点属性
  138. if 'data_source' in receiver:
  139. del receiver['data_source']
  140. tag_list = receiver.get('tag')
  141. receiver.update(update_attributes)
  142. # 创建或获取 DataResource 节点
  143. with neo4j_driver.get_session() as session:
  144. props_str = ", ".join([f"{k}: ${k}" for k in receiver.keys()])
  145. cypher = f"""
  146. MERGE (n:DataResource {{name_zh: $name_zh}})
  147. ON CREATE SET n = {{{props_str}}}
  148. ON MATCH SET {", ".join([f"n.{k} = ${k}" for k in receiver.keys()])}
  149. RETURN n
  150. """
  151. result = session.run(cypher, **receiver)
  152. data_resource_node = result.single()["n"]
  153. resource_id = data_resource_node.id # 使用id属性获取数值ID
  154. # 记录创建后的节点数据
  155. logger.info(f"创建后的节点数据,describe字段: {data_resource_node.get('describe')}")
  156. # 处理标签关系
  157. if tag_list:
  158. tag_node = get_node_by_id('DataLabel', tag_list)
  159. if tag_node:
  160. # 检查关系是否存在
  161. rel_check = """
  162. MATCH (a:DataResource)-[r:LABEL]->(b:DataLabel)
  163. WHERE id(a) = $resource_id AND id(b) = $tag_id
  164. RETURN r
  165. """
  166. rel_result = session.run(rel_check, resource_id=resource_id, tag_id=tag_node.id) # 使用数值id
  167. # 如果关系不存在则创建
  168. if not rel_result.single():
  169. rel_create = """
  170. MATCH (a:DataResource), (b:DataLabel)
  171. WHERE id(a) = $resource_id AND id(b) = $tag_id
  172. CREATE (a)-[r:LABEL]->(b)
  173. RETURN r
  174. """
  175. session.run(rel_create, resource_id=resource_id, tag_id=tag_node.id)
  176. # 处理头部数据(元数据,字段)
  177. if head_data:
  178. for item in head_data:
  179. # 创建元数据节点
  180. meta_cypher = """
  181. MERGE (m:DataMeta {name_zh: $name_zh})
  182. ON CREATE SET m.name_en = $name_en,
  183. m.create_time = $create_time,
  184. m.data_type = $data_type,
  185. m.status = true
  186. ON MATCH SET m.data_type = $data_type,
  187. m.status = true
  188. RETURN m
  189. """
  190. create_time = get_formatted_time()
  191. meta_result = session.run(
  192. meta_cypher,
  193. name_zh=item['name_zh'],
  194. name_en=item['name_en'],
  195. create_time=create_time,
  196. data_type=item['data_type'] # 使用data_type作为data_type属性
  197. )
  198. meta_record = meta_result.single()
  199. if meta_record and meta_record["m"]:
  200. meta_node = meta_record["m"]
  201. meta_id = meta_node.id # 使用数值ID
  202. # 打印日志确认节点创建成功和ID
  203. logger.info(f"创建或获取到元数据节点: ID={meta_id}, name_zh={item['name_zh']}")
  204. # 确认数据资源节点是否可以正确查询到
  205. check_resource_cypher = """
  206. MATCH (n:DataResource)
  207. WHERE id(n) = $resource_id
  208. RETURN n
  209. """
  210. check_resource = session.run(check_resource_cypher, resource_id=resource_id)
  211. if check_resource.single():
  212. logger.info(f"找到数据资源节点: ID={resource_id}")
  213. else:
  214. logger.error(f"无法找到数据资源节点: ID={resource_id}")
  215. continue
  216. # 创建关系
  217. rel_cypher = """
  218. MATCH (a:DataResource), (m:DataMeta)
  219. WHERE id(a) = $resource_id AND id(m) = $meta_id
  220. MERGE (a)-[r:INCLUDES]->(m)
  221. RETURN r
  222. """
  223. rel_result = session.run(
  224. rel_cypher,
  225. resource_id=resource_id,
  226. meta_id=meta_id
  227. )
  228. rel_record = rel_result.single()
  229. if rel_record:
  230. logger.info(f"成功创建数据资源与元数据的关系: {resource_id} -> {meta_id}")
  231. else:
  232. logger.warning(f"创建数据资源与元数据的关系失败: {resource_id} -> {meta_id}")
  233. else:
  234. logger.error(f"未能创建或获取元数据节点: {item['name_zh']}")
  235. # 处理数据源关系 - 支持所有资源类型
  236. if data_source:
  237. try:
  238. # 获取数据源节点的标识(支持多种格式)
  239. data_source_id = None
  240. data_source_name_en = None
  241. # 1. 如果是数字(节点ID)
  242. if isinstance(data_source, (int, float)) or (isinstance(data_source, str) and data_source.isdigit()):
  243. data_source_id = int(data_source)
  244. logger.info(f"data_source 为节点ID: {data_source_id}")
  245. # 2. 如果是字典且包含name_en
  246. elif isinstance(data_source, dict) and data_source.get('name_en'):
  247. data_source_name_en = data_source['name_en']
  248. logger.info(f"data_source 为字典,提取name_en: {data_source_name_en}")
  249. # 3. 如果是字符串(name_en)
  250. elif isinstance(data_source, str):
  251. data_source_name_en = data_source
  252. logger.info(f"data_source 为字符串name_en: {data_source_name_en}")
  253. # 创建数据资源与数据源的关系
  254. if data_source_id is not None:
  255. # 使用节点ID创建关系
  256. # 首先检查数据源节点是否存在
  257. check_ds_cypher = "MATCH (b:DataSource) WHERE id(b) = $ds_id RETURN b"
  258. check_ds_result = session.run(check_ds_cypher, ds_id=data_source_id)
  259. if not check_ds_result.single():
  260. logger.warning(f"数据源节点不存在: ID={data_source_id},跳过关系创建")
  261. else:
  262. # 创建 COME_FROM 关系
  263. rel_data_source_cypher = """
  264. MATCH (a:DataResource), (b:DataSource)
  265. WHERE id(a) = $resource_id AND id(b) = $ds_id
  266. MERGE (a)-[r:COME_FROM]->(b)
  267. RETURN r
  268. """
  269. rel_result = session.run(
  270. rel_data_source_cypher,
  271. resource_id=resource_id,
  272. ds_id=data_source_id
  273. )
  274. rel_record = rel_result.single()
  275. if rel_record:
  276. logger.info(f"已创建数据资源与数据源的COME_FROM关系: resource_id={resource_id} -> data_source_id={data_source_id}")
  277. else:
  278. logger.warning(f"创建COME_FROM关系失败,但不中断主流程: {resource_id} -> {data_source_id}")
  279. elif data_source_name_en:
  280. # 使用name_en创建关系(兼容旧方式)
  281. # 首先检查数据源节点是否存在
  282. check_ds_cypher = "MATCH (b:DataSource) WHERE b.name_en = $ds_name_en RETURN b"
  283. check_ds_result = session.run(check_ds_cypher, ds_name_en=data_source_name_en)
  284. if not check_ds_result.single():
  285. logger.warning(f"数据源节点不存在: name_en={data_source_name_en},跳过关系创建")
  286. else:
  287. # 创建 COME_FROM 关系
  288. rel_data_source_cypher = """
  289. MATCH (a:DataResource), (b:DataSource)
  290. WHERE id(a) = $resource_id AND b.name_en = $ds_name_en
  291. MERGE (a)-[r:COME_FROM]->(b)
  292. RETURN r
  293. """
  294. rel_result = session.run(
  295. rel_data_source_cypher,
  296. resource_id=resource_id,
  297. ds_name_en=data_source_name_en
  298. )
  299. rel_record = rel_result.single()
  300. if rel_record:
  301. logger.info(f"已创建数据资源与数据源的COME_FROM关系: resource_id={resource_id} -> name_en={data_source_name_en}")
  302. else:
  303. logger.warning(f"创建COME_FROM关系失败,但不中断主流程: {resource_id} -> {data_source_name_en}")
  304. else:
  305. logger.warning(f"data_source参数无效,无法识别格式: {data_source}")
  306. except Exception as e:
  307. # 数据源关系创建失败不应该中断主流程
  308. logger.error(f"处理数据源关系失败(不中断主流程): {str(e)}")
  309. # 不再抛出异常,允许主流程继续
  310. return resource_id
  311. except Exception as e:
  312. logger.error(f"处理数据资源节点创建和关系建立失败: {str(e)}")
  313. raise
  314. def handle_id_resource(resource_id):
  315. """处理单个数据资源查询"""
  316. try:
  317. with neo4j_driver.get_session() as session:
  318. # 确保resource_id为整数
  319. try:
  320. resource_id_int = int(resource_id)
  321. except (ValueError, TypeError):
  322. logger.error(f"资源ID不是有效的整数: {resource_id}")
  323. return None
  324. # 使用数值ID查询
  325. cypher = """
  326. MATCH (n:DataResource)
  327. WHERE id(n) = $resource_id
  328. RETURN n
  329. """
  330. result = session.run(cypher, resource_id=resource_id_int)
  331. record = result.single()
  332. if not record:
  333. logger.error(f"未找到资源,ID: {resource_id_int}")
  334. return None
  335. # 构建返回数据
  336. logger.info(f"record: {record}")
  337. data_resource = serialize_node_properties(record["n"])
  338. logger.info(f"data_resource: {data_resource}")
  339. logger.info(f"describe field in node: {record['n'].get('describe')}")
  340. # 确保describe字段存在,即使为null也记录下来
  341. if 'describe' in record["n"]:
  342. data_resource["describe"] = record["n"].get('describe')
  343. logger.info(f"设置describe字段: {data_resource['describe']}")
  344. data_resource["id"] = record["n"].id
  345. # 查询关联的标签
  346. tag_cypher = """
  347. MATCH (n:DataResource)-[r:LABEL]->(t:DataLabel)
  348. WHERE id(n) = $resource_id
  349. RETURN t
  350. """
  351. tag_result = session.run(tag_cypher, resource_id=resource_id_int)
  352. tag_record = tag_result.single()
  353. # 设置标签信息
  354. if tag_record:
  355. tag = {
  356. "name_zh": tag_record["t"].get("name_zh"),
  357. "id": tag_record["t"].id
  358. }
  359. else:
  360. tag = {
  361. "name_zh": None,
  362. "id": None
  363. }
  364. data_resource["tag"] = tag
  365. # 查询关联的数据源(COME_FROM关系)
  366. data_source_cypher = """
  367. MATCH (n:DataResource)-[r:COME_FROM]->(ds:DataSource)
  368. WHERE id(n) = $resource_id
  369. RETURN ds
  370. """
  371. data_source_result = session.run(data_source_cypher, resource_id=resource_id_int)
  372. data_source_record = data_source_result.single()
  373. # 设置数据源信息
  374. if data_source_record:
  375. data_resource["data_source"] = data_source_record["ds"].id
  376. logger.info(f"找到关联的数据源,ID: {data_source_record['ds'].id}")
  377. else:
  378. data_resource["data_source"] = None
  379. logger.info(f"未找到关联的数据源")
  380. # 查询关联的元数据 - 支持meta_data和Metadata两种标签
  381. meta_cypher = """
  382. MATCH (n:DataResource)-[:INCLUDES]->(m)
  383. WHERE id(n) = $resource_id
  384. AND (m:DataMeta OR m:Metadata)
  385. RETURN m
  386. """
  387. meta_result = session.run(meta_cypher, resource_id=resource_id_int)
  388. parsed_data = []
  389. for meta_record in meta_result:
  390. meta = serialize_node_properties(meta_record["m"])
  391. meta_data = {
  392. "id": meta_record["m"].id,
  393. "name_zh": meta.get("name_zh"),
  394. "name_en": meta.get("name_en"),
  395. "data_type": meta.get("data_type"),
  396. "data_standard": {
  397. "name_zh": None,
  398. "id": None
  399. }
  400. }
  401. parsed_data.append(meta_data)
  402. data_resource["parsed_data"] = parsed_data
  403. # 确保所有必需字段都有默认值
  404. required_fields = {
  405. "leader": "",
  406. "organization": "",
  407. "name_zh": "",
  408. "name_en": "",
  409. "data_sensitivity": "",
  410. "storage_location": "/",
  411. "create_time": "",
  412. "type": "",
  413. "category": "",
  414. "url": "",
  415. "frequency": "",
  416. "status": True,
  417. "keywords": [],
  418. "describe": ""
  419. }
  420. for field, default_value in required_fields.items():
  421. if field not in data_resource or data_resource[field] is None:
  422. data_resource[field] = default_value
  423. logger.info(f"成功获取资源详情,ID: {resource_id_int}, describe: {data_resource.get('describe')}")
  424. return data_resource
  425. except Exception as e:
  426. logger.error(f"处理单个数据资源查询失败: {str(e)}")
  427. return None
  428. def id_resource_graph(resource_id):
  429. """获取数据资源图谱"""
  430. try:
  431. with neo4j_driver.get_session() as session:
  432. # 查询数据资源节点及其关系
  433. cypher = """
  434. MATCH (n:DataResource)-[r]-(m)
  435. WHERE id(n) = $resource_id
  436. RETURN n, r, m
  437. """
  438. result = session.run(cypher, resource_id=int(resource_id))
  439. # 收集节点和关系
  440. nodes = {}
  441. relationships = []
  442. for record in result:
  443. # 处理源节点
  444. source_node = serialize_node_properties(record["n"])
  445. source_node["id"] = record["n"].id
  446. nodes[source_node["id"]] = source_node
  447. # 处理目标节点
  448. target_node = serialize_node_properties(record["m"])
  449. target_node["id"] = record["m"].id
  450. nodes[target_node["id"]] = target_node
  451. # 处理关系
  452. rel = record["r"]
  453. relationship = {
  454. "id": rel.id,
  455. "source": record["n"].id,
  456. "target": record["m"].id,
  457. "type": rel.type
  458. }
  459. relationships.append(relationship)
  460. return {
  461. "nodes": list(nodes.values()),
  462. "relationships": relationships
  463. }
  464. except Exception as e:
  465. logger.error(f"获取数据资源图谱失败: {str(e)}")
  466. return {"nodes": [], "relationships": []}
  467. def resource_list(page, page_size, name_en_filter=None, name_zh_filter=None,
  468. type_filter='all', category_filter=None, tag_filter=None):
  469. """获取数据资源列表"""
  470. try:
  471. with neo4j_driver.get_session() as session:
  472. # 构建基础过滤条件(针对DataResource节点)
  473. resource_conditions = []
  474. if name_en_filter:
  475. resource_conditions.append(f"n.name_en CONTAINS '{name_en_filter}'")
  476. if name_zh_filter:
  477. resource_conditions.append(f"n.name_zh CONTAINS '{name_zh_filter}'")
  478. if type_filter and type_filter != 'all':
  479. resource_conditions.append(f"n.type = '{type_filter}'")
  480. if category_filter:
  481. resource_conditions.append(f"n.category = '{category_filter}'")
  482. # 构建基础WHERE子句
  483. resource_where = " AND ".join(resource_conditions) if resource_conditions else ""
  484. # 根据是否有tag_filter选择不同的查询策略
  485. if tag_filter:
  486. # 有标签过滤:先过滤DataResource,再连接标签
  487. if resource_where:
  488. # 计算总数
  489. count_cypher = f"""
  490. MATCH (n:DataResource)
  491. WHERE {resource_where}
  492. WITH n
  493. MATCH (n)-[:LABEL]->(t:DataLabel)
  494. WHERE t.name_zh = '{tag_filter}'
  495. RETURN count(DISTINCT n) as count
  496. """
  497. # 分页查询
  498. skip = (page - 1) * page_size
  499. cypher = f"""
  500. MATCH (n:DataResource)
  501. WHERE {resource_where}
  502. WITH n
  503. MATCH (n)-[:LABEL]->(t:DataLabel)
  504. WHERE t.name_zh = '{tag_filter}'
  505. RETURN DISTINCT n
  506. ORDER BY n.create_time DESC
  507. SKIP {skip} LIMIT {page_size}
  508. """
  509. else:
  510. # 只有标签过滤条件
  511. count_cypher = f"""
  512. MATCH (n:DataResource)-[:LABEL]->(t:DataLabel)
  513. WHERE t.name_zh = '{tag_filter}'
  514. RETURN count(DISTINCT n) as count
  515. """
  516. # 分页查询
  517. skip = (page - 1) * page_size
  518. cypher = f"""
  519. MATCH (n:DataResource)-[:LABEL]->(t:DataLabel)
  520. WHERE t.name_zh = '{tag_filter}'
  521. RETURN DISTINCT n
  522. ORDER BY n.create_time DESC
  523. SKIP {skip} LIMIT {page_size}
  524. """
  525. else:
  526. # 无标签过滤:标准查询
  527. if resource_where:
  528. # 计算总数
  529. count_cypher = f"""
  530. MATCH (n:DataResource)
  531. WHERE {resource_where}
  532. RETURN count(n) as count
  533. """
  534. # 分页查询
  535. skip = (page - 1) * page_size
  536. cypher = f"""
  537. MATCH (n:DataResource)
  538. WHERE {resource_where}
  539. RETURN n
  540. ORDER BY n.create_time DESC
  541. SKIP {skip} LIMIT {page_size}
  542. """
  543. else:
  544. # 无任何过滤条件
  545. count_cypher = "MATCH (n:DataResource) RETURN count(n) as count"
  546. # 分页查询
  547. skip = (page - 1) * page_size
  548. cypher = f"""
  549. MATCH (n:DataResource)
  550. RETURN n
  551. ORDER BY n.create_time DESC
  552. SKIP {skip} LIMIT {page_size}
  553. """
  554. # 执行计数查询
  555. count_result = session.run(count_cypher)
  556. total_count = count_result.single()["count"]
  557. # 执行分页查询
  558. result = session.run(cypher)
  559. # 格式化结果
  560. resources = []
  561. for record in result:
  562. node = serialize_node_properties(record["n"])
  563. node["id"] = record["n"].id
  564. # 查询关联的标签
  565. tag_cypher = """
  566. MATCH (n:DataResource)-[r:LABEL]->(t:DataLabel)
  567. WHERE id(n) = $resource_id
  568. RETURN t
  569. """
  570. tag_result = session.run(tag_cypher, resource_id=node["id"])
  571. tag_record = tag_result.single()
  572. if tag_record:
  573. tag = serialize_node_properties(tag_record["t"])
  574. tag["id"] = tag_record["t"].id
  575. node["tag_info"] = tag
  576. resources.append(node)
  577. return resources, total_count
  578. except Exception as e:
  579. logger.error(f"获取数据资源列表失败: {str(e)}")
  580. return [], 0
  581. def id_data_search_list(resource_id, page, page_size, name_en_filter=None,
  582. name_zh_filter=None, category_filter=None, tag_filter=None):
  583. """获取特定数据资源关联的元数据列表"""
  584. try:
  585. with neo4j_driver.get_session() as session:
  586. # 确保resource_id为整数
  587. try:
  588. resource_id_int = int(resource_id)
  589. except (ValueError, TypeError):
  590. logger.error(f"资源ID不是有效的整数: {resource_id}")
  591. return [], 0
  592. # 基本匹配语句 - 支持meta_data和Metadata标签
  593. match_clause = """
  594. MATCH (n:DataResource)-[:INCLUDES]->(m)
  595. WHERE id(n) = $resource_id
  596. AND (m:DataMeta OR m:Metadata)
  597. """
  598. where_conditions = []
  599. if name_en_filter:
  600. where_conditions.append(f"m.name_en CONTAINS '{name_en_filter}'")
  601. if name_zh_filter:
  602. where_conditions.append(f"m.name_zh CONTAINS '{name_zh_filter}'")
  603. if category_filter:
  604. where_conditions.append(f"m.category = '{category_filter}'")
  605. # 标签过滤需要额外的匹配
  606. tag_match = ""
  607. if tag_filter:
  608. tag_match = "MATCH (m)-[:HAS_TAG]->(t:Tag) WHERE t.name_zh = $tag_filter"
  609. where_clause = " AND " + " AND ".join(where_conditions) if where_conditions else ""
  610. # 计算总数
  611. count_cypher = f"""
  612. {match_clause}{where_clause}
  613. {tag_match}
  614. RETURN count(m) as count
  615. """
  616. count_params = {"resource_id": resource_id_int}
  617. if tag_filter:
  618. count_params["tag_filter"] = tag_filter
  619. count_result = session.run(count_cypher, **count_params)
  620. total_count = count_result.single()["count"]
  621. # 分页查询
  622. skip = (page - 1) * page_size
  623. cypher = f"""
  624. {match_clause}{where_clause}
  625. {tag_match}
  626. RETURN m
  627. ORDER BY m.name_zh
  628. SKIP {skip} LIMIT {page_size}
  629. """
  630. result = session.run(cypher, **count_params)
  631. # 格式化结果
  632. metadata_list = []
  633. for record in result:
  634. meta = serialize_node_properties(record["m"])
  635. meta["id"] = record["m"].id
  636. metadata_list.append(meta)
  637. logger.info(f"成功获取资源关联元数据,ID: {resource_id_int}, 元数据数量: {total_count}")
  638. return metadata_list, total_count
  639. except Exception as e:
  640. logger.error(f"获取数据资源关联的元数据列表失败: {str(e)}")
  641. return [], 0
  642. def resource_kinship_graph(resource_id, include_meta=True):
  643. """获取数据资源亲缘关系图谱"""
  644. try:
  645. with neo4j_driver.get_session() as session:
  646. # 确保resource_id为整数
  647. try:
  648. resource_id_int = int(resource_id)
  649. except (ValueError, TypeError):
  650. logger.error(f"资源ID不是有效的整数: {resource_id}")
  651. return {"nodes": [], "relationships": []}
  652. # 基本查询
  653. cypher_parts = [
  654. f"MATCH (n:DataResource) WHERE id(n) = $resource_id",
  655. "OPTIONAL MATCH (n)-[:LABEL]->(l:DataLabel)",
  656. ]
  657. # 是否包含元数据 - 支持meta_data和Metadata两种标签
  658. if include_meta:
  659. cypher_parts.append("OPTIONAL MATCH (n)-[:INCLUDES]->(m) WHERE (m:DataMeta OR m:Metadata)")
  660. cypher_parts.append("RETURN n, l, collect(m) as metadata")
  661. cypher = "\n".join(cypher_parts)
  662. result = session.run(cypher, resource_id=resource_id_int)
  663. record = result.single()
  664. if not record:
  665. logger.error(f"未找到资源图谱数据,ID: {resource_id_int}")
  666. return {"nodes": [], "relationships": []}
  667. # 收集节点和关系
  668. nodes = {}
  669. relationships = []
  670. # 处理数据资源节点
  671. resource_node = serialize_node_properties(record["n"])
  672. resource_node["id"] = record["n"].id
  673. resource_node["node_type"] = list(record["n"].labels)
  674. nodes[resource_node["id"]] = resource_node
  675. # 处理标签节点
  676. if record["l"]:
  677. label_node = serialize_node_properties(record["l"])
  678. label_node["id"] = record["l"].id
  679. label_node["node_type"] = list(record["l"].labels)
  680. nodes[label_node["id"]] = label_node
  681. # 添加资源-标签关系
  682. relationships.append({
  683. "id": f"rel-{resource_node['id']}-label-{label_node['id']}",
  684. "from": resource_node["id"],
  685. "to": label_node["id"],
  686. "text": "label"
  687. })
  688. # 处理元数据节点
  689. if include_meta and record["metadata"]:
  690. for meta in record["metadata"]:
  691. if meta: # 检查元数据节点是否存在
  692. meta_node = serialize_node_properties(meta)
  693. meta_node["id"] = meta.id
  694. meta_node["node_type"] = list(meta.labels)
  695. nodes[meta_node["id"]] = meta_node
  696. # 添加资源-元数据关系
  697. relationships.append({
  698. "id": f"rel-{resource_node['id']}-INCLUDES-{meta_node['id']}",
  699. "from": resource_node["id"],
  700. "to": meta_node["id"],
  701. "text": "INCLUDES"
  702. })
  703. logger.info(f"成功获取资源图谱,ID: {resource_id_int}, 节点数: {len(nodes)}")
  704. return {
  705. "nodes": list(nodes.values()),
  706. "relationships": relationships
  707. }
  708. except Exception as e:
  709. logger.error(f"获取数据资源亲缘关系图谱失败: {str(e)}")
  710. return {"nodes": [], "relationships": []}
  711. def resource_impact_all_graph(resource_id, include_meta=True):
  712. """获取数据资源影响关系图谱"""
  713. try:
  714. with neo4j_driver.get_session() as session:
  715. # 确保resource_id为整数
  716. try:
  717. resource_id_int = int(resource_id)
  718. except (ValueError, TypeError):
  719. logger.error(f"资源ID不是有效的整数: {resource_id}")
  720. return {"nodes": [], "lines": []}
  721. # 根据meta参数决定查询深度,限制为一层
  722. if include_meta:
  723. cypher = """
  724. MATCH path = (n:DataResource)-[*1..1]-(m)
  725. WHERE id(n) = $resource_id
  726. RETURN path
  727. """
  728. else:
  729. cypher = """
  730. MATCH path = (n:DataResource)-[*1..1]-(m)
  731. WHERE id(n) = $resource_id
  732. AND NOT (m:DataMeta) AND NOT (m:Metadata)
  733. RETURN path
  734. """
  735. result = session.run(cypher, resource_id=resource_id_int)
  736. # 收集节点和关系
  737. nodes = {}
  738. lines = {}
  739. for record in result:
  740. path = record["path"]
  741. # 处理路径中的所有节点
  742. for node in path.nodes:
  743. if node.id not in nodes:
  744. node_dict = serialize_node_properties(node)
  745. node_dict["id"] = str(node.id)
  746. node_dict["node_type"] = list(node.labels)[0] if node.labels else ""
  747. nodes[node.id] = node_dict
  748. # 处理路径中的所有关系。Neo4j的路径对象(path)中,关系集合的属性名是relationships
  749. for rel in path.relationships:
  750. if rel.id not in lines:
  751. rel_dict = {
  752. "id": str(rel.id),
  753. "from": str(rel.start_node.id),
  754. "to": str(rel.end_node.id),
  755. "text": rel.type
  756. }
  757. lines[rel.id] = rel_dict
  758. logger.info(f"成功获取完整图谱,ID: {resource_id_int}, 节点数: {len(nodes)}")
  759. return {
  760. "nodes": list(nodes.values()),
  761. "lines": list(lines.values())
  762. }
  763. except Exception as e:
  764. logger.error(f"获取数据资源影响关系图谱失败: {str(e)}")
  765. return {"nodes": [], "lines": []}
  766. def clean_type(type_str):
  767. """清洗SQL类型字符串"""
  768. # 提取基本类型,不包括长度或精度信息
  769. basic_type = re.sub(r'\(.*?\)', '', type_str).strip().upper()
  770. # 移除 VARYING 这样的后缀
  771. basic_type = re.sub(r'\s+VARYING$', '', basic_type)
  772. # 标准化常见类型
  773. type_mapping = {
  774. 'INT': 'INTEGER',
  775. 'INT4': 'INTEGER',
  776. 'INT8': 'BIGINT',
  777. 'SMALLINT': 'SMALLINT',
  778. 'BIGINT': 'BIGINT',
  779. 'FLOAT4': 'FLOAT',
  780. 'FLOAT8': 'DOUBLE',
  781. 'REAL': 'FLOAT',
  782. 'DOUBLE PRECISION': 'DOUBLE',
  783. 'NUMERIC': 'DECIMAL',
  784. 'BOOL': 'BOOLEAN',
  785. 'CHARACTER': 'CHAR',
  786. 'CHAR VARYING': 'VARCHAR',
  787. 'CHARACTER VARYING': 'VARCHAR',
  788. 'TEXT': 'TEXT',
  789. 'DATE': 'DATE',
  790. 'TIME': 'TIME',
  791. 'TIMESTAMP': 'TIMESTAMP',
  792. 'TIMESTAMPTZ': 'TIMESTAMP WITH TIME ZONE',
  793. 'BYTEA': 'BINARY',
  794. 'JSON': 'JSON',
  795. 'JSONB': 'JSONB',
  796. 'UUID': 'UUID',
  797. 'SERIAL': 'SERIAL',
  798. 'SERIAL4': 'SERIAL',
  799. 'SERIAL8': 'BIGSERIAL',
  800. 'BIGSERIAL': 'BIGSERIAL'
  801. }
  802. # 尝试从映射表中获取标准化的类型
  803. return type_mapping.get(basic_type, basic_type)
  804. def clean_field_name(field_name):
  805. """清洗字段名"""
  806. return field_name.strip('`').strip('"').strip("'")
  807. def select_create_ddl(sql_content):
  808. """从SQL内容中提取创建表的DDL语句"""
  809. try:
  810. # 解析复杂的SQL文件,识别所有的CREATE TABLE语句及其关联的注释
  811. # 找到所有以CREATE TABLE开头的语句块,每个语句块包含主语句和相关的注释
  812. # 首先,分割 SQL 内容按分号
  813. statements = []
  814. current_statement = ""
  815. in_string = False
  816. string_quote = None
  817. for char in sql_content:
  818. if char in ["'", '"']:
  819. if not in_string:
  820. in_string = True
  821. string_quote = char
  822. elif char == string_quote:
  823. in_string = False
  824. string_quote = None
  825. current_statement += char
  826. elif char == ';' and not in_string:
  827. current_statement += char
  828. if current_statement.strip():
  829. statements.append(current_statement.strip())
  830. current_statement = ""
  831. else:
  832. current_statement += char
  833. if current_statement.strip():
  834. statements.append(current_statement.strip())
  835. # 找出所有的CREATE TABLE语句和关联的注释
  836. create_table_statements = []
  837. create_index = -1
  838. in_table_block = False
  839. current_table = None
  840. current_block = ""
  841. for i, stmt in enumerate(statements):
  842. if re.search(r'^\s*CREATE\s+TABLE', stmt, re.IGNORECASE):
  843. # 如果已经在处理表,先保存当前块
  844. if in_table_block and current_block:
  845. create_table_statements.append(current_block)
  846. # 开始新的表块
  847. in_table_block = True
  848. current_block = stmt
  849. # 提取表名
  850. table_match = re.search(r'CREATE\s+TABLE\s+(?:(?:"[^"]+"|\'[^\']+\'|[^"\'\s\.]+)\.)?(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\(]+))', stmt, re.IGNORECASE)
  851. if table_match:
  852. current_table = table_match.group(1) or table_match.group(2) or table_match.group(3)
  853. current_table = current_table.strip('"\'') if current_table else ""
  854. elif in_table_block and (re.search(r'COMMENT\s+ON\s+TABLE', stmt, re.IGNORECASE) or
  855. re.search(r'COMMENT\s+ON\s+COLUMN', stmt, re.IGNORECASE)):
  856. # 检查注释是否属于当前表
  857. if current_table:
  858. # 表注释处理
  859. if re.search(r'COMMENT\s+ON\s+TABLE', stmt, re.IGNORECASE):
  860. table_comment_match = re.search(r'COMMENT\s+ON\s+TABLE\s+[\'"]?(\w+)[\'"]?', stmt, re.IGNORECASE)
  861. if table_comment_match:
  862. comment_table = table_comment_match.group(1).strip('"\'')
  863. if comment_table == current_table:
  864. current_block += " " + stmt
  865. else:
  866. # 这是另一个表的注释,当前表的DDL到此结束
  867. create_table_statements.append(current_block)
  868. in_table_block = False
  869. current_block = ""
  870. current_table = None
  871. # 列注释处理
  872. elif re.search(r'COMMENT\s+ON\s+COLUMN', stmt, re.IGNORECASE):
  873. column_comment_match = re.search(
  874. r'COMMENT\s+ON\s+COLUMN\s+[\'"]?(\w+)[\'"]?\.[\'"]?(\w+)[\'"]?\s+IS\s+\'([^\']+)\'',
  875. stmt,
  876. re.IGNORECASE
  877. )
  878. if column_comment_match:
  879. comment_table = column_comment_match.group(1)
  880. if comment_table == current_table:
  881. current_block += " " + stmt
  882. else:
  883. # 这是另一个表的注释,当前表的DDL到此结束
  884. create_table_statements.append(current_block)
  885. in_table_block = False
  886. current_block = ""
  887. current_table = None
  888. elif in_table_block and re.search(r'^\s*CREATE\s+', stmt, re.IGNORECASE):
  889. # 如果遇到新的CREATE语句(不是注释),保存当前块并结束
  890. create_table_statements.append(current_block)
  891. in_table_block = False
  892. current_block = ""
  893. current_table = None
  894. # 添加最后一个块
  895. if in_table_block and current_block:
  896. create_table_statements.append(current_block)
  897. # 日志记录
  898. logger.debug(f"提取到 {len(create_table_statements)} 个DDL语句")
  899. for i, stmt in enumerate(create_table_statements):
  900. logger.debug(f"DDL语句 {i+1}: {stmt}")
  901. return create_table_statements
  902. except Exception as e:
  903. logger.error(f"提取DDL语句失败: {str(e)}")
  904. # logger.error(traceback.format_exc())
  905. return []
  906. def table_sql(sql):
  907. """解析表定义SQL,支持带schema和不带schema两种格式"""
  908. try:
  909. # 支持以下格式:
  910. # 1. CREATE TABLE tablename
  911. # 2. CREATE TABLE "tablename"
  912. # 3. CREATE TABLE 'tablename'
  913. # 4. CREATE TABLE schema.tablename
  914. # 5. CREATE TABLE "schema"."tablename"
  915. # 6. CREATE TABLE 'schema'.'tablename'
  916. # 匹配表名,支持带引号和不带引号的情况
  917. table_pattern = r'CREATE\s+TABLE\s+(?:(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\.]+))\.)?(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\(]+))'
  918. table_match = re.search(table_pattern, sql, re.IGNORECASE)
  919. if not table_match:
  920. logger.error(f"无法匹配CREATE TABLE语句: {sql[:100]}...")
  921. return None
  922. # 获取表名
  923. schema = table_match.group(1) or table_match.group(2) or table_match.group(3)
  924. table_name = table_match.group(4) or table_match.group(5) or table_match.group(6)
  925. if not table_name:
  926. logger.error("无法解析表名")
  927. return None
  928. logger.debug(f"解析到表名: {table_name}")
  929. # 提取CREATE TABLE语句的主体部分(括号内的内容)
  930. body_pattern = r'CREATE\s+TABLE\s+[^(]*\((.*?)\)(?=\s*;|\s*$)'
  931. body_match = re.search(body_pattern, sql, re.DOTALL | re.IGNORECASE)
  932. if not body_match:
  933. logger.error("无法提取表主体内容")
  934. return None
  935. body_text = body_match.group(1).strip()
  936. logger.debug(f"表定义主体部分: {body_text}")
  937. # 解析字段定义
  938. fields = []
  939. # 分割字段定义,处理括号嵌套和引号
  940. field_defs = []
  941. pos = 0
  942. in_parentheses = 0
  943. in_quotes = False
  944. quote_char = None
  945. for i, char in enumerate(body_text):
  946. if char in ["'", '"', '`'] and (not in_quotes or char == quote_char):
  947. in_quotes = not in_quotes
  948. if in_quotes:
  949. quote_char = char
  950. else:
  951. quote_char = None
  952. elif char == '(' and not in_quotes:
  953. in_parentheses += 1
  954. elif char == ')' and not in_quotes:
  955. in_parentheses -= 1
  956. elif char == ',' and in_parentheses == 0 and not in_quotes:
  957. field_defs.append(body_text[pos:i].strip())
  958. pos = i + 1
  959. # 添加最后一个字段定义
  960. if pos < len(body_text):
  961. field_defs.append(body_text[pos:].strip())
  962. logger.debug(f"解析出 {len(field_defs)} 个字段定义")
  963. # 处理每个字段定义
  964. for field_def in field_defs:
  965. # 跳过约束定义
  966. if re.match(r'^\s*(?:PRIMARY|UNIQUE|FOREIGN|CHECK|CONSTRAINT)\s+', field_def, re.IGNORECASE):
  967. continue
  968. # 提取字段名和类型
  969. field_pattern = r'^\s*(?:"([^"]+)"|\'([^\']+)\'|`([^`]+)`|([a-zA-Z0-9_]+))\s+(.+?)(?:\s+DEFAULT\s+|\s+NOT\s+NULL|\s+REFERENCES|\s*$)'
  970. field_match = re.search(field_pattern, field_def, re.IGNORECASE)
  971. if field_match:
  972. # 提取字段名
  973. field_name = field_match.group(1) or field_match.group(2) or field_match.group(3) or field_match.group(4)
  974. # 提取类型
  975. field_type = field_match.group(5).strip()
  976. # 处理类型中可能的括号
  977. type_base = re.split(r'\s+', field_type)[0]
  978. clean_type_value = clean_type(type_base)
  979. fields.append((field_name, clean_type_value))
  980. logger.debug(f"解析到字段: {field_name}, 类型: {clean_type_value}")
  981. else:
  982. logger.warning(f"无法解析字段定义: {field_def}")
  983. # 提取表注释
  984. table_comment = ""
  985. table_comment_pattern = r"COMMENT\s+ON\s+TABLE\s+(?:['\"]?(\w+)['\"]?)\s+IS\s+'([^']+)'"
  986. table_comment_match = re.search(table_comment_pattern, sql, re.IGNORECASE)
  987. if table_comment_match:
  988. comment_table = table_comment_match.group(1)
  989. if comment_table.strip("'\"") == table_name.strip("'\""):
  990. table_comment = table_comment_match.group(2)
  991. logger.debug(f"找到表注释: {table_comment}")
  992. # 提取列注释
  993. comments = {}
  994. column_comment_pattern = r"COMMENT\s+ON\s+COLUMN\s+['\"]?(\w+)['\"]?\.['\"]?(\w+)['\"]?\s+IS\s+'([^']+)'"
  995. for match in re.finditer(column_comment_pattern, sql, re.IGNORECASE):
  996. comment_table = match.group(1)
  997. column_name = match.group(2)
  998. comment = match.group(3)
  999. # 检查表名是否匹配
  1000. if comment_table.strip("'\"") == table_name.strip("'\""):
  1001. comments[column_name] = comment
  1002. logger.debug(f"找到列注释: {column_name} - {comment}")
  1003. else:
  1004. logger.debug(f"忽略列注释,表名不匹配: {comment_table} vs {table_name}")
  1005. # 检查字段和注释匹配情况
  1006. logger.debug("========字段和注释匹配情况========")
  1007. field_names = [f[0] for f in fields]
  1008. logger.debug(f"字段列表 ({len(field_names)}): {field_names}")
  1009. logger.debug(f"注释字段 ({len(comments)}): {list(comments.keys())}")
  1010. # 构建返回结果
  1011. meta_list = []
  1012. for field_name, field_type in fields:
  1013. chinese_name = comments.get(field_name, "")
  1014. meta_list.append({
  1015. "name_en": field_name,
  1016. "data_type": field_type,
  1017. "name_zh": chinese_name if chinese_name else field_name
  1018. })
  1019. # 检查表是否存在
  1020. try:
  1021. status = status_query([table_name])
  1022. except Exception as e:
  1023. logger.error(f"检查表存在状态失败: {str(e)}")
  1024. status = [False]
  1025. # 构建返回结果
  1026. result = {
  1027. table_name: {
  1028. "exist": status[0] if status else False,
  1029. "meta": meta_list
  1030. }
  1031. }
  1032. logger.debug(f"解析结果: {json.dumps(result, ensure_ascii=False)}")
  1033. return result
  1034. except Exception as e:
  1035. logger.error(f"解析表定义SQL失败: {str(e)}")
  1036. logger.error(f"异常详情: {e}")
  1037. import traceback
  1038. logger.error(traceback.format_exc())
  1039. return None
  1040. # 判断英文表名是否在图谱中存在
  1041. def status_query(key_list):
  1042. query = """
  1043. unwind $Key_list as name
  1044. OPTIONAL MATCH (n:DataModel {name_en: name})
  1045. OPTIONAL MATCH (n:DataResource {name_en: name})
  1046. OPTIONAL MATCH (n:DataMetric {name_en: name})
  1047. WITH name, CASE
  1048. WHEN n IS NOT NULL THEN True
  1049. ELSE False
  1050. END AS exist
  1051. return collect(exist)AS exist
  1052. """
  1053. with neo4j_driver.get_session() as session:
  1054. result = session.run(query, Key_list=key_list)
  1055. data = result.value() # 获取单个值
  1056. return data
  1057. def select_sql(sql_query):
  1058. """解析SELECT查询语句"""
  1059. try:
  1060. # 提取SELECT子句
  1061. select_pattern = r'SELECT\s+(.*?)\s+FROM'
  1062. select_match = re.search(select_pattern, sql_query, re.IGNORECASE | re.DOTALL)
  1063. if not select_match:
  1064. return None
  1065. select_clause = select_match.group(1)
  1066. # 分割字段
  1067. fields = []
  1068. # 处理字段列表,避免在函数调用中的逗号导致错误分割
  1069. in_parenthesis = 0
  1070. current_field = ""
  1071. for char in select_clause:
  1072. if char == '(':
  1073. in_parenthesis += 1
  1074. current_field += char
  1075. elif char == ')':
  1076. in_parenthesis -= 1
  1077. current_field += char
  1078. elif char == ',' and in_parenthesis == 0:
  1079. fields.append(current_field.strip())
  1080. current_field = ""
  1081. else:
  1082. current_field += char
  1083. if current_field.strip():
  1084. fields.append(current_field.strip())
  1085. # 解析每个字段
  1086. parsed_fields = []
  1087. for field in fields:
  1088. # 检查是否有字段别名
  1089. alias_pattern = r'(.*?)\s+[aA][sS]\s+(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))$'
  1090. alias_match = re.search(alias_pattern, field)
  1091. if alias_match:
  1092. field_expr = alias_match.group(1).strip()
  1093. field_alias = next((g for g in alias_match.groups()[1:] if g is not None), "")
  1094. parsed_fields.append({
  1095. "expression": field_expr,
  1096. "alias": field_alias
  1097. })
  1098. else:
  1099. # 没有别名的情况
  1100. parsed_fields.append({
  1101. "expression": field.strip(),
  1102. "alias": None
  1103. })
  1104. # 提取FROM子句和表名
  1105. from_pattern = r'FROM\s+(.*?)(?:\s+WHERE|\s+GROUP|\s+HAVING|\s+ORDER|\s+LIMIT|$)'
  1106. from_match = re.search(from_pattern, sql_query, re.IGNORECASE | re.DOTALL)
  1107. tables = []
  1108. if from_match:
  1109. from_clause = from_match.group(1).strip()
  1110. # 分析FROM子句中的表
  1111. table_pattern = r'(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))(?:\s+(?:AS\s+)?(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))?'
  1112. for match in re.finditer(table_pattern, from_clause):
  1113. table_name = match.group(1) or match.group(2) or match.group(3) or match.group(4)
  1114. if table_name:
  1115. tables.append(table_name)
  1116. return tables
  1117. except Exception as e:
  1118. logger.error(f"解析SELECT查询语句失败: {str(e)}")
  1119. # logger.error(traceback.format_exc())
  1120. return None
  1121. def model_resource_list(page, page_size, name_filter=None):
  1122. """获取模型资源列表"""
  1123. try:
  1124. with neo4j_driver.get_session() as session:
  1125. # 构建查询条件
  1126. match_clause = "MATCH (n:model_resource)"
  1127. where_clause = ""
  1128. if name_filter:
  1129. where_clause = f" WHERE n.name_zh CONTAINS '{name_filter}'"
  1130. # 计算总数
  1131. count_cypher = f"{match_clause}{where_clause} RETURN count(n) as count"
  1132. count_result = session.run(count_cypher)
  1133. total_count = count_result.single()["count"]
  1134. # 分页查询
  1135. skip = (page - 1) * page_size
  1136. cypher = f"""
  1137. {match_clause}{where_clause}
  1138. RETURN n
  1139. ORDER BY n.create_time DESC
  1140. SKIP {skip} LIMIT {page_size}
  1141. """
  1142. result = session.run(cypher)
  1143. # 格式化结果
  1144. resources = []
  1145. for record in result:
  1146. node = serialize_node_properties(record["n"])
  1147. node["id"] = record["n"].id
  1148. resources.append(node)
  1149. return resources, total_count
  1150. except Exception as e:
  1151. logger.error(f"获取模型资源列表失败: {str(e)}")
  1152. return [], 0
  1153. def data_resource_edit(data):
  1154. """编辑数据资源"""
  1155. try:
  1156. resource_id = data.get("id")
  1157. if not resource_id:
  1158. raise ValueError("缺少资源ID")
  1159. with neo4j_driver.get_session() as session:
  1160. # 更新节点属性
  1161. update_fields = {}
  1162. for key, value in data.items():
  1163. if key != "id" and key != "parsed_data" and value is not None:
  1164. update_fields[key] = value
  1165. # 记录describe字段是否存在于待更新数据中
  1166. if "describe" in data:
  1167. logger.info(f"编辑资源,describe字段将被更新为: {data.get('describe')}")
  1168. else:
  1169. logger.info("编辑资源,describe字段不在更新数据中")
  1170. # 添加更新时间
  1171. update_fields["create_time"] = get_formatted_time()
  1172. # 构建更新语句,确保至少有 updateTime 字段要更新
  1173. if update_fields:
  1174. set_clause = ", ".join([f"n.{k} = ${k}" for k in update_fields.keys()])
  1175. cypher = f"""
  1176. MATCH (n:DataResource)
  1177. WHERE id(n) = $resource_id
  1178. SET {set_clause}
  1179. RETURN n
  1180. """
  1181. result = session.run(cypher, resource_id=int(resource_id), **update_fields)
  1182. else:
  1183. # 如果没有字段需要更新,只查询节点
  1184. cypher = """
  1185. MATCH (n:DataResource)
  1186. WHERE id(n) = $resource_id
  1187. RETURN n
  1188. """
  1189. result = session.run(cypher, resource_id=int(resource_id))
  1190. updated_node = result.single()
  1191. if not updated_node:
  1192. raise ValueError("资源不存在")
  1193. # 记录更新后的节点数据
  1194. logger.info(f"更新后的节点数据,describe字段: {updated_node['n'].get('describe')}")
  1195. # 处理标签关系
  1196. tag_id = data.get("tag")
  1197. if tag_id:
  1198. # 删除旧的标签关系
  1199. delete_rel_cypher = """
  1200. MATCH (n:DataResource)-[r:LABEL]->()
  1201. WHERE id(n) = $resource_id
  1202. DELETE r
  1203. """
  1204. session.run(delete_rel_cypher, resource_id=int(resource_id))
  1205. # 创建新的标签关系
  1206. create_rel_cypher = """
  1207. MATCH (n:DataResource), (t:DataLabel)
  1208. WHERE id(n) = $resource_id AND id(t) = $tag_id
  1209. CREATE (n)-[r:LABEL]->(t)
  1210. RETURN r
  1211. """
  1212. session.run(create_rel_cypher, resource_id=int(resource_id), tag_id=int(tag_id))
  1213. # 处理元数据关系
  1214. parsed_data = data.get("parsed_data", [])
  1215. # 首先删除旧的元数据关系和清洗资源关系(无论parsed_data是否为空都要执行)
  1216. delete_meta_cypher = """
  1217. MATCH (n:DataResource)-[r:INCLUDES]->()
  1218. WHERE id(n) = $resource_id
  1219. DELETE r
  1220. """
  1221. session.run(delete_meta_cypher, resource_id=int(resource_id))
  1222. delete_clean_cypher = """
  1223. MATCH (n:DataResource)-[r:clean_resource]->()
  1224. WHERE id(n) = $resource_id
  1225. DELETE r
  1226. """
  1227. session.run(delete_clean_cypher, resource_id=int(resource_id))
  1228. # 根据parsed_data是否为空来决定是否执行预处理和关系新建操作
  1229. if parsed_data:
  1230. # 预处理 parsed_data,确保每个 metadata 都有有效的 ID
  1231. for meta in parsed_data:
  1232. meta_id = meta.get("id")
  1233. meta_name = meta.get("name_zh")
  1234. if not meta_id and meta_name:
  1235. # 如果没有 ID 但有 name_zh,先根据 name_zh 查找是否存在对应的 DataMeta 节点
  1236. find_meta_cypher = """
  1237. MATCH (m:DataMeta {name_zh: $meta_name})
  1238. RETURN m
  1239. """
  1240. find_result = session.run(find_meta_cypher, meta_name=meta_name)
  1241. existing_meta = find_result.single()
  1242. if existing_meta:
  1243. # 如果找到了,使用现有的 ID
  1244. meta_id = existing_meta["m"].id
  1245. meta["id"] = meta_id
  1246. logger.info(f"找到现有的DataMeta节点: {meta_name}, ID: {meta_id}")
  1247. else:
  1248. # 如果没有找到,创建新的 DataMeta 节点
  1249. create_meta_cypher = """
  1250. CREATE (m:DataMeta {
  1251. name_zh: $name_zh,
  1252. name_en: $name_en,
  1253. data_type: $data_type,
  1254. create_time: $create_time
  1255. })
  1256. RETURN m
  1257. """
  1258. create_time = get_formatted_time()
  1259. new_meta_result = session.run(
  1260. create_meta_cypher,
  1261. name_zh=meta_name,
  1262. name_en=meta.get("name_en", meta_name),
  1263. data_type=meta.get("data_type", "varchar(255)"),
  1264. create_time=create_time
  1265. )
  1266. new_meta = new_meta_result.single()
  1267. if new_meta:
  1268. meta_id = new_meta["m"].id
  1269. meta["id"] = meta_id
  1270. logger.info(f"创建新的DataMeta节点: {meta_name}, ID: {meta_id}")
  1271. else:
  1272. logger.error(f"创建DataMeta节点失败: {meta_name}")
  1273. continue
  1274. elif not meta_id:
  1275. logger.warning(f"跳过没有ID和name的metadata: {meta}")
  1276. continue
  1277. # 创建新的元数据关系和相关关系
  1278. for meta in parsed_data:
  1279. meta_id = meta.get("id")
  1280. if meta_id:
  1281. # 创建元数据关系
  1282. create_meta_cypher = """
  1283. MATCH (n:DataResource), (m:DataMeta)
  1284. WHERE id(n) = $resource_id AND id(m) = $meta_id
  1285. CREATE (n)-[r:INCLUDES]->(m)
  1286. RETURN r
  1287. """
  1288. session.run(create_meta_cypher, resource_id=int(resource_id), meta_id=int(meta_id))
  1289. # 处理主数据关系
  1290. master_data = meta.get("master_data")
  1291. if master_data:
  1292. # 创建主数据关系
  1293. create_master_cypher = """
  1294. MATCH (master), (meta:DataMeta)
  1295. WHERE id(master) = $master_id AND id(meta) = $meta_id
  1296. MERGE (master)-[r:master]->(meta)
  1297. RETURN r
  1298. """
  1299. session.run(create_master_cypher, master_id=int(master_data), meta_id=int(meta_id))
  1300. # 处理数据标准关系
  1301. data_standard = meta.get("data_standard")
  1302. if data_standard and isinstance(data_standard, dict) and data_standard.get("id"):
  1303. standard_id = data_standard.get("id")
  1304. # 创建数据标准与元数据的关系
  1305. create_standard_meta_cypher = """
  1306. MATCH (standard), (meta:DataMeta)
  1307. WHERE id(standard) = $standard_id AND id(meta) = $meta_id
  1308. MERGE (standard)-[r:clean_resource]->(meta)
  1309. RETURN r
  1310. """
  1311. session.run(create_standard_meta_cypher, standard_id=int(standard_id), meta_id=int(meta_id))
  1312. # 创建数据资源与数据标准的关系
  1313. create_resource_standard_cypher = """
  1314. MATCH (resource:DataResource), (standard)
  1315. WHERE id(resource) = $resource_id AND id(standard) = $standard_id
  1316. MERGE (resource)-[r:clean_resource]->(standard)
  1317. RETURN r
  1318. """
  1319. session.run(create_resource_standard_cypher, resource_id=int(resource_id), standard_id=int(standard_id))
  1320. else:
  1321. logger.info(f"parsed_data为空,只删除旧的元数据关系,不创建新的关系")
  1322. # 返回更新后的节点
  1323. node_data = serialize_node_properties(updated_node["n"])
  1324. node_data["id"] = updated_node["n"].id
  1325. # 记录最终返回的describe字段
  1326. logger.info(f"data_resource_edit返回数据,describe字段: {node_data.get('describe')}")
  1327. return node_data
  1328. except Exception as e:
  1329. logger.error(f"编辑数据资源失败: {str(e)}")
  1330. raise
  1331. def handle_data_source(data_source):
  1332. """处理数据源信息,创建或获取数据源节点"""
  1333. try:
  1334. with neo4j_driver.get_session() as session:
  1335. # 获取英文名称作为唯一标识
  1336. ds_name_en = data_source.get("name_en")
  1337. if not ds_name_en:
  1338. logger.error("数据源缺少必要的name_en属性")
  1339. return None
  1340. # 如果没有设置name_zh,使用name_en作为name_zh
  1341. if "name_zh" not in data_source or not data_source["name_zh"]:
  1342. data_source["name_zh"] = ds_name_en
  1343. # 检查必填字段
  1344. required_fields = ["type", "host", "port", "database", "username"]
  1345. has_required_fields = all(data_source.get(field) for field in required_fields)
  1346. # 查询是否已存在相同name_en的数据源
  1347. existing_cypher = """
  1348. MATCH (ds:DataSource {name_en: $name_en})
  1349. RETURN ds
  1350. """
  1351. existing_result = session.run(existing_cypher, name_en=ds_name_en)
  1352. existing_record = existing_result.single()
  1353. if existing_record:
  1354. existing_data_source = serialize_node_properties(existing_record["ds"])
  1355. logger.info(f"根据名称找到现有数据源: {existing_data_source.get('name_en')}")
  1356. return existing_data_source.get("name_en")
  1357. else:
  1358. # 数据源不存在,抛出异常
  1359. raise ValueError(f"未找到名称为 {ds_name_en} 的数据源,请先创建该数据源或提供完整的数据源信息")
  1360. except Exception as e:
  1361. logger.error(f"处理数据源失败: {str(e)}")
  1362. raise RuntimeError(f"处理数据源失败: {str(e)}")