resource.py 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283
  1. import json
  2. import re
  3. import logging
  4. from py2neo import Relationship
  5. import pandas as pd
  6. from app.services.neo4j_driver import neo4j_driver
  7. from app.services.package_function import create_or_get_node, relationship_exists, get_node
  8. logger = logging.getLogger("app")
  9. def get_formatted_time():
  10. """获取格式化的当前时间"""
  11. import time
  12. return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
  13. def get_node_by_id(label, id):
  14. """根据ID获取指定标签的节点"""
  15. try:
  16. with neo4j_driver.get_session() as session:
  17. # 确保id为整数
  18. try:
  19. id_int = int(id)
  20. except (ValueError, TypeError):
  21. logger.error(f"节点ID不是有效的整数: {id}")
  22. return None
  23. cypher = f"MATCH (n:{label}) WHERE id(n) = $id RETURN n"
  24. result = session.run(cypher, id=id_int)
  25. record = result.single()
  26. return record["n"] if record else None
  27. except Exception as e:
  28. logger.error(f"根据ID获取节点失败: {str(e)}")
  29. return None
  30. def get_node_by_id_no_label(id):
  31. """根据ID获取节点,不限制标签"""
  32. try:
  33. with neo4j_driver.get_session() as session:
  34. # 确保id为整数
  35. try:
  36. id_int = int(id)
  37. except (ValueError, TypeError):
  38. logger.error(f"节点ID不是有效的整数: {id}")
  39. return None
  40. cypher = "MATCH (n) WHERE id(n) = $id RETURN n"
  41. result = session.run(cypher, id=id_int)
  42. record = result.single()
  43. return record["n"] if record else None
  44. except Exception as e:
  45. logger.error(f"根据ID获取节点失败: {str(e)}")
  46. return None
  47. def delete_relationships(start_node, rel_type=None, end_node=None):
  48. """删除关系"""
  49. try:
  50. with neo4j_driver.get_session() as session:
  51. if rel_type and end_node:
  52. cypher = "MATCH (a)-[r:`{rel_type}`]->(b) WHERE id(a) = $start_id AND id(b) = $end_id DELETE r"
  53. cypher = cypher.replace("{rel_type}", rel_type)
  54. session.run(cypher, start_id=start_node.id, end_id=end_node.id)
  55. elif rel_type:
  56. cypher = "MATCH (a)-[r:`{rel_type}`]->() WHERE id(a) = $start_id DELETE r"
  57. cypher = cypher.replace("{rel_type}", rel_type)
  58. session.run(cypher, start_id=start_node.id)
  59. else:
  60. cypher = "MATCH (a)-[r]->() WHERE id(a) = $start_id DELETE r"
  61. session.run(cypher, start_id=start_node.id)
  62. return True
  63. except Exception as e:
  64. logger.error(f"删除关系失败: {str(e)}")
  65. return False
  66. def update_or_create_node(label, **properties):
  67. """更新或创建节点"""
  68. try:
  69. with neo4j_driver.get_session() as session:
  70. node_id = properties.pop('id', None)
  71. if node_id:
  72. # 更新现有节点
  73. set_clause = ", ".join([f"n.{k} = ${k}" for k in properties.keys()])
  74. cypher = f"MATCH (n:{label}) WHERE id(n) = $id SET {set_clause} RETURN n"
  75. result = session.run(cypher, id=int(node_id), **properties)
  76. else:
  77. # 创建新节点
  78. props_str = ", ".join([f"{k}: ${k}" for k in properties.keys()])
  79. cypher = f"CREATE (n:{label} {{{props_str}}}) RETURN n"
  80. result = session.run(cypher, **properties)
  81. record = result.single()
  82. return record["n"] if record else None
  83. except Exception as e:
  84. logger.error(f"更新或创建节点失败: {str(e)}")
  85. return None
  86. # 数据资源-元数据 关系节点创建、查询
  87. def handle_node(receiver, head_data, data_resource, data_source=None):
  88. """处理数据资源节点创建和关系建立"""
  89. try:
  90. # 更新属性
  91. update_attributes = {
  92. 'en_name': data_resource['en_name'],
  93. 'time': get_formatted_time(),
  94. 'type': 'structure' # 结构化文件没有type
  95. }
  96. if 'additional_info' in receiver:
  97. del receiver['additional_info']
  98. # 从receiver中移除data_source属性,避免将复杂对象作为节点属性
  99. if 'data_source' in receiver:
  100. del receiver['data_source']
  101. tag_list = receiver.get('tag')
  102. receiver.update(update_attributes)
  103. # 创建或获取 data_resource 节点
  104. with neo4j_driver.get_session() as session:
  105. props_str = ", ".join([f"{k}: ${k}" for k in receiver.keys()])
  106. cypher = f"""
  107. MERGE (n:data_resource {{name: $name}})
  108. ON CREATE SET n = {{{props_str}}}
  109. ON MATCH SET {", ".join([f"n.{k} = ${k}" for k in receiver.keys()])}
  110. RETURN n
  111. """
  112. result = session.run(cypher, **receiver)
  113. data_resource_node = result.single()["n"]
  114. resource_id = data_resource_node.id # 使用id属性获取数值ID
  115. # 处理标签关系
  116. if tag_list:
  117. tag_node = get_node_by_id('data_label', tag_list)
  118. if tag_node:
  119. # 检查关系是否存在
  120. rel_check = """
  121. MATCH (a:data_resource)-[r:label]->(b:data_label)
  122. WHERE id(a) = $resource_id AND id(b) = $tag_id
  123. RETURN r
  124. """
  125. rel_result = session.run(rel_check, resource_id=resource_id, tag_id=tag_node.id) # 使用数值id
  126. # 如果关系不存在则创建
  127. if not rel_result.single():
  128. rel_create = """
  129. MATCH (a:data_resource), (b:data_label)
  130. WHERE id(a) = $resource_id AND id(b) = $tag_id
  131. CREATE (a)-[r:label]->(b)
  132. RETURN r
  133. """
  134. session.run(rel_create, resource_id=resource_id, tag_id=tag_node.id)
  135. # 处理头部数据(元数据,字段)
  136. if head_data:
  137. for item in head_data:
  138. # 创建元数据节点
  139. meta_cypher = """
  140. MERGE (m:meta_data {name: $name})
  141. ON CREATE SET m.en_name = $en_name,
  142. m.create_time = $create_time,
  143. m.data_type = $type,
  144. m.status = 'true'
  145. ON MATCH SET m.data_type = $type,
  146. m.status = 'true'
  147. RETURN m
  148. """
  149. create_time = get_formatted_time()
  150. meta_result = session.run(
  151. meta_cypher,
  152. name=item['name'],
  153. en_name=item['en_name'],
  154. create_time=create_time,
  155. type=item['data_type'] # 使用data_type作为data_type属性
  156. )
  157. meta_record = meta_result.single()
  158. if meta_record and meta_record["m"]:
  159. meta_node = meta_record["m"]
  160. meta_id = meta_node.id # 使用数值ID
  161. # 打印日志确认节点创建成功和ID
  162. logger.info(f"创建或获取到元数据节点: ID={meta_id}, name={item['name']}")
  163. # 确认数据资源节点是否可以正确查询到
  164. check_resource_cypher = """
  165. MATCH (n:data_resource)
  166. WHERE id(n) = $resource_id
  167. RETURN n
  168. """
  169. check_resource = session.run(check_resource_cypher, resource_id=resource_id)
  170. if check_resource.single():
  171. logger.info(f"找到数据资源节点: ID={resource_id}")
  172. else:
  173. logger.error(f"无法找到数据资源节点: ID={resource_id}")
  174. continue
  175. # 创建关系
  176. rel_cypher = """
  177. MATCH (a:data_resource), (m:meta_data)
  178. WHERE id(a) = $resource_id AND id(m) = $meta_id
  179. MERGE (a)-[r:contain]->(m)
  180. RETURN r
  181. """
  182. rel_result = session.run(
  183. rel_cypher,
  184. resource_id=resource_id,
  185. meta_id=meta_id
  186. )
  187. rel_record = rel_result.single()
  188. if rel_record:
  189. logger.info(f"成功创建数据资源与元数据的关系: {resource_id} -> {meta_id}")
  190. else:
  191. logger.warning(f"创建数据资源与元数据的关系失败: {resource_id} -> {meta_id}")
  192. else:
  193. logger.error(f"未能创建或获取元数据节点: {item['name']}")
  194. # 处理数据源关系
  195. if data_source:
  196. try:
  197. # 创建或获取数据源节点
  198. data_source_en_name = handle_data_source(data_source)
  199. # 创建数据资源与数据源的关系
  200. if data_source_en_name:
  201. # 创建 isbelongto 关系
  202. rel_data_source_cypher = """
  203. MATCH (a:data_resource), (b:data_source)
  204. WHERE id(a) = $resource_id AND b.en_name = $ds_en_name
  205. MERGE (a)-[r:isbelongto]->(b)
  206. RETURN r
  207. """
  208. session.run(
  209. rel_data_source_cypher,
  210. resource_id=resource_id,
  211. ds_en_name=data_source_en_name
  212. )
  213. logger.info(f"已创建数据资源与数据源的关系: {resource_id} -> {data_source_en_name}")
  214. except Exception as e:
  215. logger.error(f"处理数据源关系失败: {str(e)}")
  216. raise RuntimeError(f"处理数据源关系失败: {str(e)}")
  217. return resource_id
  218. except Exception as e:
  219. logger.error(f"处理数据资源节点创建和关系建立失败: {str(e)}")
  220. raise
  221. def handle_id_resource(resource_id):
  222. """处理单个数据资源查询"""
  223. try:
  224. with neo4j_driver.get_session() as session:
  225. # 确保resource_id为整数
  226. try:
  227. resource_id_int = int(resource_id)
  228. except (ValueError, TypeError):
  229. logger.error(f"资源ID不是有效的整数: {resource_id}")
  230. return None
  231. # 使用数值ID查询
  232. cypher = """
  233. MATCH (n:data_resource)
  234. WHERE id(n) = $resource_id
  235. RETURN n
  236. """
  237. result = session.run(cypher, resource_id=resource_id_int)
  238. record = result.single()
  239. if not record:
  240. logger.error(f"未找到资源,ID: {resource_id_int}")
  241. return None
  242. # 构建返回数据
  243. data_resource = dict(record["n"])
  244. data_resource["id"] = record["n"].id
  245. # 查询关联的标签
  246. tag_cypher = """
  247. MATCH (n:data_resource)-[:label]->(t:data_label)
  248. WHERE id(n) = $resource_id
  249. RETURN t
  250. """
  251. tag_result = session.run(tag_cypher, resource_id=resource_id_int)
  252. tag_record = tag_result.single()
  253. if tag_record:
  254. tag = dict(tag_record["t"])
  255. tag["id"] = tag_record["t"].id
  256. data_resource["tag_info"] = tag
  257. # 查询关联的元数据 - 支持meta_data和Metadata两种标签
  258. meta_cypher = """
  259. MATCH (n:data_resource)-[:contain]->(m)
  260. WHERE id(n) = $resource_id
  261. AND (m:meta_data OR m:Metadata)
  262. RETURN m
  263. """
  264. meta_result = session.run(meta_cypher, resource_id=resource_id_int)
  265. meta_list = []
  266. for meta_record in meta_result:
  267. meta = dict(meta_record["m"])
  268. meta["id"] = meta_record["m"].id
  269. meta_list.append(meta)
  270. data_resource["meta_list"] = meta_list
  271. logger.info(f"成功获取资源详情,ID: {resource_id_int}")
  272. return data_resource
  273. except Exception as e:
  274. logger.error(f"处理单个数据资源查询失败: {str(e)}")
  275. return None
  276. def id_resource_graph(resource_id):
  277. """获取数据资源图谱"""
  278. try:
  279. with neo4j_driver.get_session() as session:
  280. # 查询数据资源节点及其关系
  281. cypher = """
  282. MATCH (n:data_resource)-[r]-(m)
  283. WHERE id(n) = $resource_id
  284. RETURN n, r, m
  285. """
  286. result = session.run(cypher, resource_id=int(resource_id))
  287. # 收集节点和关系
  288. nodes = {}
  289. relationships = []
  290. for record in result:
  291. # 处理源节点
  292. source_node = dict(record["n"])
  293. source_node["id"] = record["n"].id
  294. nodes[source_node["id"]] = source_node
  295. # 处理目标节点
  296. target_node = dict(record["m"])
  297. target_node["id"] = record["m"].id
  298. nodes[target_node["id"]] = target_node
  299. # 处理关系
  300. rel = record["r"]
  301. relationship = {
  302. "id": rel.id,
  303. "source": record["n"].id,
  304. "target": record["m"].id,
  305. "type": rel.type
  306. }
  307. relationships.append(relationship)
  308. return {
  309. "nodes": list(nodes.values()),
  310. "relationships": relationships
  311. }
  312. except Exception as e:
  313. logger.error(f"获取数据资源图谱失败: {str(e)}")
  314. return {"nodes": [], "relationships": []}
  315. def resource_list(page, page_size, en_name_filter=None, name_filter=None,
  316. type_filter='all', category_filter=None, tag_filter=None):
  317. """获取数据资源列表"""
  318. try:
  319. with neo4j_driver.get_session() as session:
  320. # 构建查询条件
  321. match_clause = "MATCH (n:data_resource)"
  322. where_conditions = []
  323. if en_name_filter:
  324. where_conditions.append(f"n.en_name CONTAINS '{en_name_filter}'")
  325. if name_filter:
  326. where_conditions.append(f"n.name CONTAINS '{name_filter}'")
  327. if type_filter and type_filter != 'all':
  328. where_conditions.append(f"n.type = '{type_filter}'")
  329. if category_filter:
  330. where_conditions.append(f"n.category = '{category_filter}'")
  331. # 标签过滤需要额外的匹配
  332. if tag_filter:
  333. match_clause += "-[:label]->(t:data_label)"
  334. where_conditions.append(f"t.name = '{tag_filter}'")
  335. where_clause = " WHERE " + " AND ".join(where_conditions) if where_conditions else ""
  336. # 计算总数
  337. count_cypher = f"{match_clause}{where_clause} RETURN count(n) as count"
  338. count_result = session.run(count_cypher)
  339. total_count = count_result.single()["count"]
  340. # 分页查询
  341. skip = (page - 1) * page_size
  342. cypher = f"""
  343. {match_clause}{where_clause}
  344. RETURN n
  345. ORDER BY n.time DESC
  346. SKIP {skip} LIMIT {page_size}
  347. """
  348. result = session.run(cypher)
  349. # 格式化结果
  350. resources = []
  351. for record in result:
  352. node = dict(record["n"])
  353. node["id"] = record["n"].id
  354. # 查询关联的标签
  355. tag_cypher = """
  356. MATCH (n:data_resource)-[:label]->(t:data_label)
  357. WHERE id(n) = $resource_id
  358. RETURN t
  359. """
  360. tag_result = session.run(tag_cypher, resource_id=node["id"])
  361. tag_record = tag_result.single()
  362. if tag_record:
  363. tag = dict(tag_record["t"])
  364. tag["id"] = tag_record["t"].id
  365. node["tag_info"] = tag
  366. resources.append(node)
  367. return resources, total_count
  368. except Exception as e:
  369. logger.error(f"获取数据资源列表失败: {str(e)}")
  370. return [], 0
  371. def id_data_search_list(resource_id, page, page_size, en_name_filter=None,
  372. name_filter=None, category_filter=None, tag_filter=None):
  373. """获取特定数据资源关联的元数据列表"""
  374. try:
  375. with neo4j_driver.get_session() as session:
  376. # 确保resource_id为整数
  377. try:
  378. resource_id_int = int(resource_id)
  379. except (ValueError, TypeError):
  380. logger.error(f"资源ID不是有效的整数: {resource_id}")
  381. return [], 0
  382. # 基本匹配语句 - 支持meta_data和Metadata标签
  383. match_clause = """
  384. MATCH (n:data_resource)-[:contain]->(m)
  385. WHERE id(n) = $resource_id
  386. AND (m:meta_data OR m:Metadata)
  387. """
  388. where_conditions = []
  389. if en_name_filter:
  390. where_conditions.append(f"m.en_name CONTAINS '{en_name_filter}'")
  391. if name_filter:
  392. where_conditions.append(f"m.name CONTAINS '{name_filter}'")
  393. if category_filter:
  394. where_conditions.append(f"m.category = '{category_filter}'")
  395. # 标签过滤需要额外的匹配
  396. tag_match = ""
  397. if tag_filter:
  398. tag_match = "MATCH (m)-[:HAS_TAG]->(t:Tag) WHERE t.name = $tag_filter"
  399. where_clause = " AND " + " AND ".join(where_conditions) if where_conditions else ""
  400. # 计算总数
  401. count_cypher = f"""
  402. {match_clause}{where_clause}
  403. {tag_match}
  404. RETURN count(m) as count
  405. """
  406. count_params = {"resource_id": resource_id_int}
  407. if tag_filter:
  408. count_params["tag_filter"] = tag_filter
  409. count_result = session.run(count_cypher, **count_params)
  410. total_count = count_result.single()["count"]
  411. # 分页查询
  412. skip = (page - 1) * page_size
  413. cypher = f"""
  414. {match_clause}{where_clause}
  415. {tag_match}
  416. RETURN m
  417. ORDER BY m.name
  418. SKIP {skip} LIMIT {page_size}
  419. """
  420. result = session.run(cypher, **count_params)
  421. # 格式化结果
  422. metadata_list = []
  423. for record in result:
  424. meta = dict(record["m"])
  425. meta["id"] = record["m"].id
  426. metadata_list.append(meta)
  427. logger.info(f"成功获取资源关联元数据,ID: {resource_id_int}, 元数据数量: {total_count}")
  428. return metadata_list, total_count
  429. except Exception as e:
  430. logger.error(f"获取数据资源关联的元数据列表失败: {str(e)}")
  431. return [], 0
  432. def resource_kinship_graph(resource_id, include_meta=True):
  433. """获取数据资源亲缘关系图谱"""
  434. try:
  435. with neo4j_driver.get_session() as session:
  436. # 确保resource_id为整数
  437. try:
  438. resource_id_int = int(resource_id)
  439. except (ValueError, TypeError):
  440. logger.error(f"资源ID不是有效的整数: {resource_id}")
  441. return {"nodes": [], "relationships": []}
  442. # 基本查询
  443. cypher_parts = [
  444. f"MATCH (n:data_resource) WHERE id(n) = $resource_id",
  445. "OPTIONAL MATCH (n)-[:label]->(l:data_label)",
  446. ]
  447. # 是否包含元数据 - 支持meta_data和Metadata两种标签
  448. if include_meta:
  449. cypher_parts.append("OPTIONAL MATCH (n)-[:contain]->(m) WHERE (m:meta_data OR m:Metadata)")
  450. cypher_parts.append("RETURN n, l, collect(m) as metadata")
  451. cypher = "\n".join(cypher_parts)
  452. result = session.run(cypher, resource_id=resource_id_int)
  453. record = result.single()
  454. if not record:
  455. logger.error(f"未找到资源图谱数据,ID: {resource_id_int}")
  456. return {"nodes": [], "relationships": []}
  457. # 收集节点和关系
  458. nodes = {}
  459. relationships = []
  460. # 处理数据资源节点
  461. resource_node = dict(record["n"])
  462. resource_node["id"] = record["n"].id
  463. resource_node["labels"] = list(record["n"].labels)
  464. nodes[resource_node["id"]] = resource_node
  465. # 处理标签节点
  466. if record["l"]:
  467. label_node = dict(record["l"])
  468. label_node["id"] = record["l"].id
  469. label_node["labels"] = list(record["l"].labels)
  470. nodes[label_node["id"]] = label_node
  471. # 添加资源-标签关系
  472. relationships.append({
  473. "id": f"rel-{resource_node['id']}-label-{label_node['id']}",
  474. "source": resource_node["id"],
  475. "target": label_node["id"],
  476. "type": "label"
  477. })
  478. # 处理元数据节点
  479. if include_meta and record["metadata"]:
  480. for meta in record["metadata"]:
  481. if meta: # 检查元数据节点是否存在
  482. meta_node = dict(meta)
  483. meta_node["id"] = meta.id
  484. meta_node["labels"] = list(meta.labels)
  485. nodes[meta_node["id"]] = meta_node
  486. # 添加资源-元数据关系
  487. relationships.append({
  488. "id": f"rel-{resource_node['id']}-contain-{meta_node['id']}",
  489. "source": resource_node["id"],
  490. "target": meta_node["id"],
  491. "type": "contain"
  492. })
  493. logger.info(f"成功获取资源图谱,ID: {resource_id_int}, 节点数: {len(nodes)}")
  494. return {
  495. "nodes": list(nodes.values()),
  496. "relationships": relationships
  497. }
  498. except Exception as e:
  499. logger.error(f"获取数据资源亲缘关系图谱失败: {str(e)}")
  500. return {"nodes": [], "relationships": []}
  501. def resource_impact_all_graph(resource_id, include_meta=True):
  502. """获取数据资源影响关系图谱"""
  503. try:
  504. with neo4j_driver.get_session() as session:
  505. # 确保resource_id为整数
  506. try:
  507. resource_id_int = int(resource_id)
  508. except (ValueError, TypeError):
  509. logger.error(f"资源ID不是有效的整数: {resource_id}")
  510. return {"nodes": [], "relationships": []}
  511. # 根据meta参数决定查询深度
  512. if include_meta:
  513. cypher = """
  514. MATCH path = (n:data_resource)-[*1..3]-(m)
  515. WHERE id(n) = $resource_id
  516. RETURN path
  517. """
  518. else:
  519. cypher = """
  520. MATCH path = (n:data_resource)-[*1..2]-(m)
  521. WHERE id(n) = $resource_id
  522. AND NOT (m:meta_data) AND NOT (m:Metadata)
  523. RETURN path
  524. """
  525. result = session.run(cypher, resource_id=resource_id_int)
  526. # 收集节点和关系
  527. nodes = {}
  528. relationships = {}
  529. for record in result:
  530. path = record["path"]
  531. # 处理路径中的所有节点
  532. for node in path.nodes:
  533. if node.id not in nodes:
  534. node_dict = dict(node)
  535. node_dict["id"] = node.id
  536. node_dict["labels"] = list(node.labels)
  537. nodes[node.id] = node_dict
  538. # 处理路径中的所有关系
  539. for rel in path.relationships:
  540. if rel.id not in relationships:
  541. rel_dict = {
  542. "id": rel.id,
  543. "source": rel.start_node.id,
  544. "target": rel.end_node.id,
  545. "type": rel.type
  546. }
  547. relationships[rel.id] = rel_dict
  548. logger.info(f"成功获取完整图谱,ID: {resource_id_int}, 节点数: {len(nodes)}")
  549. return {
  550. "nodes": list(nodes.values()),
  551. "relationships": list(relationships.values())
  552. }
  553. except Exception as e:
  554. logger.error(f"获取数据资源影响关系图谱失败: {str(e)}")
  555. return {"nodes": [], "relationships": []}
  556. def clean_type(type_str):
  557. """清洗SQL类型字符串"""
  558. # 提取基本类型,不包括长度或精度信息
  559. basic_type = re.sub(r'\(.*?\)', '', type_str).strip().upper()
  560. # 移除 VARYING 这样的后缀
  561. basic_type = re.sub(r'\s+VARYING$', '', basic_type)
  562. # 标准化常见类型
  563. type_mapping = {
  564. 'INT': 'INTEGER',
  565. 'INT4': 'INTEGER',
  566. 'INT8': 'BIGINT',
  567. 'SMALLINT': 'SMALLINT',
  568. 'BIGINT': 'BIGINT',
  569. 'FLOAT4': 'FLOAT',
  570. 'FLOAT8': 'DOUBLE',
  571. 'REAL': 'FLOAT',
  572. 'DOUBLE PRECISION': 'DOUBLE',
  573. 'NUMERIC': 'DECIMAL',
  574. 'BOOL': 'BOOLEAN',
  575. 'CHARACTER': 'CHAR',
  576. 'CHAR VARYING': 'VARCHAR',
  577. 'CHARACTER VARYING': 'VARCHAR',
  578. 'TEXT': 'TEXT',
  579. 'DATE': 'DATE',
  580. 'TIME': 'TIME',
  581. 'TIMESTAMP': 'TIMESTAMP',
  582. 'TIMESTAMPTZ': 'TIMESTAMP WITH TIME ZONE',
  583. 'BYTEA': 'BINARY',
  584. 'JSON': 'JSON',
  585. 'JSONB': 'JSONB',
  586. 'UUID': 'UUID',
  587. 'SERIAL': 'SERIAL',
  588. 'SERIAL4': 'SERIAL',
  589. 'SERIAL8': 'BIGSERIAL',
  590. 'BIGSERIAL': 'BIGSERIAL'
  591. }
  592. # 尝试从映射表中获取标准化的类型
  593. return type_mapping.get(basic_type, basic_type)
  594. def clean_field_name(field_name):
  595. """清洗字段名"""
  596. return field_name.strip('`').strip('"').strip("'")
  597. def select_create_ddl(sql_content):
  598. """从SQL内容中提取创建表的DDL语句"""
  599. try:
  600. # 解析复杂的SQL文件,识别所有的CREATE TABLE语句及其关联的注释
  601. # 找到所有以CREATE TABLE开头的语句块,每个语句块包含主语句和相关的注释
  602. # 首先,分割 SQL 内容按分号
  603. statements = []
  604. current_statement = ""
  605. in_string = False
  606. string_quote = None
  607. for char in sql_content:
  608. if char in ["'", '"']:
  609. if not in_string:
  610. in_string = True
  611. string_quote = char
  612. elif char == string_quote:
  613. in_string = False
  614. string_quote = None
  615. current_statement += char
  616. elif char == ';' and not in_string:
  617. current_statement += char
  618. if current_statement.strip():
  619. statements.append(current_statement.strip())
  620. current_statement = ""
  621. else:
  622. current_statement += char
  623. if current_statement.strip():
  624. statements.append(current_statement.strip())
  625. # 找出所有的CREATE TABLE语句和关联的注释
  626. create_table_statements = []
  627. create_index = -1
  628. in_table_block = False
  629. current_table = None
  630. current_block = ""
  631. for i, stmt in enumerate(statements):
  632. if re.search(r'^\s*CREATE\s+TABLE', stmt, re.IGNORECASE):
  633. # 如果已经在处理表,先保存当前块
  634. if in_table_block and current_block:
  635. create_table_statements.append(current_block)
  636. # 开始新的表块
  637. in_table_block = True
  638. current_block = stmt
  639. # 提取表名
  640. table_match = re.search(r'CREATE\s+TABLE\s+(?:(?:"[^"]+"|\'[^\']+\'|[^"\'\s\.]+)\.)?(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\(]+))', stmt, re.IGNORECASE)
  641. if table_match:
  642. current_table = table_match.group(1) or table_match.group(2) or table_match.group(3)
  643. current_table = current_table.strip('"\'') if current_table else ""
  644. elif in_table_block and (re.search(r'COMMENT\s+ON\s+TABLE', stmt, re.IGNORECASE) or
  645. re.search(r'COMMENT\s+ON\s+COLUMN', stmt, re.IGNORECASE)):
  646. # 检查注释是否属于当前表
  647. if current_table:
  648. # 表注释处理
  649. if re.search(r'COMMENT\s+ON\s+TABLE', stmt, re.IGNORECASE):
  650. table_comment_match = re.search(r'COMMENT\s+ON\s+TABLE\s+[\'"]?(\w+)[\'"]?', stmt, re.IGNORECASE)
  651. if table_comment_match:
  652. comment_table = table_comment_match.group(1).strip('"\'')
  653. if comment_table == current_table:
  654. current_block += " " + stmt
  655. else:
  656. # 这是另一个表的注释,当前表的DDL到此结束
  657. create_table_statements.append(current_block)
  658. in_table_block = False
  659. current_block = ""
  660. current_table = None
  661. # 列注释处理
  662. elif re.search(r'COMMENT\s+ON\s+COLUMN', stmt, re.IGNORECASE):
  663. column_comment_match = re.search(
  664. r'COMMENT\s+ON\s+COLUMN\s+[\'"]?(\w+)[\'"]?\.[\'"]?(\w+)[\'"]?\s+IS\s+\'([^\']+)\'',
  665. stmt,
  666. re.IGNORECASE
  667. )
  668. if column_comment_match:
  669. comment_table = column_comment_match.group(1)
  670. if comment_table == current_table:
  671. current_block += " " + stmt
  672. else:
  673. # 这是另一个表的注释,当前表的DDL到此结束
  674. create_table_statements.append(current_block)
  675. in_table_block = False
  676. current_block = ""
  677. current_table = None
  678. elif in_table_block and re.search(r'^\s*CREATE\s+', stmt, re.IGNORECASE):
  679. # 如果遇到新的CREATE语句(不是注释),保存当前块并结束
  680. create_table_statements.append(current_block)
  681. in_table_block = False
  682. current_block = ""
  683. current_table = None
  684. # 添加最后一个块
  685. if in_table_block and current_block:
  686. create_table_statements.append(current_block)
  687. # 日志记录
  688. logger.debug(f"提取到 {len(create_table_statements)} 个DDL语句")
  689. for i, stmt in enumerate(create_table_statements):
  690. logger.debug(f"DDL语句 {i+1}: {stmt}")
  691. return create_table_statements
  692. except Exception as e:
  693. logger.error(f"提取DDL语句失败: {str(e)}")
  694. # logger.error(traceback.format_exc())
  695. return []
  696. def table_sql(sql):
  697. """解析表定义SQL,支持带schema和不带schema两种格式"""
  698. try:
  699. # 支持以下格式:
  700. # 1. CREATE TABLE tablename
  701. # 2. CREATE TABLE "tablename"
  702. # 3. CREATE TABLE 'tablename'
  703. # 4. CREATE TABLE schema.tablename
  704. # 5. CREATE TABLE "schema"."tablename"
  705. # 6. CREATE TABLE 'schema'.'tablename'
  706. # 匹配表名,支持带引号和不带引号的情况
  707. table_pattern = r'CREATE\s+TABLE\s+(?:(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\.]+))\.)?(?:"([^"]+)"|\'([^\']+)\'|([^"\'\s\(]+))'
  708. table_match = re.search(table_pattern, sql, re.IGNORECASE)
  709. if not table_match:
  710. logger.error(f"无法匹配CREATE TABLE语句: {sql[:100]}...")
  711. return None
  712. # 获取表名
  713. schema = table_match.group(1) or table_match.group(2) or table_match.group(3)
  714. table_name = table_match.group(4) or table_match.group(5) or table_match.group(6)
  715. if not table_name:
  716. logger.error("无法解析表名")
  717. return None
  718. logger.debug(f"解析到表名: {table_name}")
  719. # 提取CREATE TABLE语句的主体部分(括号内的内容)
  720. body_pattern = r'CREATE\s+TABLE\s+[^(]*\((.*?)\)(?=\s*;|\s*$)'
  721. body_match = re.search(body_pattern, sql, re.DOTALL | re.IGNORECASE)
  722. if not body_match:
  723. logger.error("无法提取表主体内容")
  724. return None
  725. body_text = body_match.group(1).strip()
  726. logger.debug(f"表定义主体部分: {body_text}")
  727. # 解析字段定义
  728. fields = []
  729. # 分割字段定义,处理括号嵌套和引号
  730. field_defs = []
  731. pos = 0
  732. in_parentheses = 0
  733. in_quotes = False
  734. quote_char = None
  735. for i, char in enumerate(body_text):
  736. if char in ["'", '"', '`'] and (not in_quotes or char == quote_char):
  737. in_quotes = not in_quotes
  738. if in_quotes:
  739. quote_char = char
  740. else:
  741. quote_char = None
  742. elif char == '(' and not in_quotes:
  743. in_parentheses += 1
  744. elif char == ')' and not in_quotes:
  745. in_parentheses -= 1
  746. elif char == ',' and in_parentheses == 0 and not in_quotes:
  747. field_defs.append(body_text[pos:i].strip())
  748. pos = i + 1
  749. # 添加最后一个字段定义
  750. if pos < len(body_text):
  751. field_defs.append(body_text[pos:].strip())
  752. logger.debug(f"解析出 {len(field_defs)} 个字段定义")
  753. # 处理每个字段定义
  754. for field_def in field_defs:
  755. # 跳过约束定义
  756. if re.match(r'^\s*(?:PRIMARY|UNIQUE|FOREIGN|CHECK|CONSTRAINT)\s+', field_def, re.IGNORECASE):
  757. continue
  758. # 提取字段名和类型
  759. field_pattern = r'^\s*(?:"([^"]+)"|\'([^\']+)\'|`([^`]+)`|([a-zA-Z0-9_]+))\s+(.+?)(?:\s+DEFAULT\s+|\s+NOT\s+NULL|\s+REFERENCES|\s*$)'
  760. field_match = re.search(field_pattern, field_def, re.IGNORECASE)
  761. if field_match:
  762. # 提取字段名
  763. field_name = field_match.group(1) or field_match.group(2) or field_match.group(3) or field_match.group(4)
  764. # 提取类型
  765. field_type = field_match.group(5).strip()
  766. # 处理类型中可能的括号
  767. type_base = re.split(r'\s+', field_type)[0]
  768. clean_type_value = clean_type(type_base)
  769. fields.append((field_name, clean_type_value))
  770. logger.debug(f"解析到字段: {field_name}, 类型: {clean_type_value}")
  771. else:
  772. logger.warning(f"无法解析字段定义: {field_def}")
  773. # 提取表注释
  774. table_comment = ""
  775. table_comment_pattern = r"COMMENT\s+ON\s+TABLE\s+(?:['\"]?(\w+)['\"]?)\s+IS\s+'([^']+)'"
  776. table_comment_match = re.search(table_comment_pattern, sql, re.IGNORECASE)
  777. if table_comment_match:
  778. comment_table = table_comment_match.group(1)
  779. if comment_table.strip("'\"") == table_name.strip("'\""):
  780. table_comment = table_comment_match.group(2)
  781. logger.debug(f"找到表注释: {table_comment}")
  782. # 提取列注释
  783. comments = {}
  784. column_comment_pattern = r"COMMENT\s+ON\s+COLUMN\s+['\"]?(\w+)['\"]?\.['\"]?(\w+)['\"]?\s+IS\s+'([^']+)'"
  785. for match in re.finditer(column_comment_pattern, sql, re.IGNORECASE):
  786. comment_table = match.group(1)
  787. column_name = match.group(2)
  788. comment = match.group(3)
  789. # 检查表名是否匹配
  790. if comment_table.strip("'\"") == table_name.strip("'\""):
  791. comments[column_name] = comment
  792. logger.debug(f"找到列注释: {column_name} - {comment}")
  793. else:
  794. logger.debug(f"忽略列注释,表名不匹配: {comment_table} vs {table_name}")
  795. # 检查字段和注释匹配情况
  796. logger.debug("========字段和注释匹配情况========")
  797. field_names = [f[0] for f in fields]
  798. logger.debug(f"字段列表 ({len(field_names)}): {field_names}")
  799. logger.debug(f"注释字段 ({len(comments)}): {list(comments.keys())}")
  800. # 构建返回结果
  801. meta_list = []
  802. for field_name, field_type in fields:
  803. chinese_name = comments.get(field_name, "")
  804. meta_list.append({
  805. "en_name": field_name,
  806. "data_type": field_type,
  807. "name": chinese_name if chinese_name else field_name
  808. })
  809. # 检查表是否存在
  810. try:
  811. status = status_query([table_name])
  812. except Exception as e:
  813. logger.error(f"检查表存在状态失败: {str(e)}")
  814. status = [False]
  815. # 构建返回结果
  816. result = {
  817. table_name: {
  818. "exist": status[0] if status else False,
  819. "meta": meta_list
  820. }
  821. }
  822. logger.debug(f"解析结果: {json.dumps(result, ensure_ascii=False)}")
  823. return result
  824. except Exception as e:
  825. logger.error(f"解析表定义SQL失败: {str(e)}")
  826. logger.error(f"异常详情: {e}")
  827. import traceback
  828. logger.error(traceback.format_exc())
  829. return None
  830. # 判断英文表名是否在图谱中存在
  831. def status_query(key_list):
  832. query = """
  833. unwind $Key_list as name
  834. OPTIONAL MATCH (n:data_model {en_name: name})
  835. OPTIONAL MATCH (n:data_resource {en_name: name})
  836. OPTIONAL MATCH (n:data_metric {en_name: name})
  837. WITH name, CASE
  838. WHEN n IS NOT NULL THEN True
  839. ELSE False
  840. END AS exist
  841. return collect(exist)AS exist
  842. """
  843. with neo4j_driver.get_session() as session:
  844. result = session.run(query, Key_list=key_list)
  845. data = result.value() # 获取单个值
  846. return data
  847. def select_sql(sql_query):
  848. """解析SELECT查询语句"""
  849. try:
  850. # 提取SELECT子句
  851. select_pattern = r'SELECT\s+(.*?)\s+FROM'
  852. select_match = re.search(select_pattern, sql_query, re.IGNORECASE | re.DOTALL)
  853. if not select_match:
  854. return None
  855. select_clause = select_match.group(1)
  856. # 分割字段
  857. fields = []
  858. # 处理字段列表,避免在函数调用中的逗号导致错误分割
  859. in_parenthesis = 0
  860. current_field = ""
  861. for char in select_clause:
  862. if char == '(':
  863. in_parenthesis += 1
  864. current_field += char
  865. elif char == ')':
  866. in_parenthesis -= 1
  867. current_field += char
  868. elif char == ',' and in_parenthesis == 0:
  869. fields.append(current_field.strip())
  870. current_field = ""
  871. else:
  872. current_field += char
  873. if current_field.strip():
  874. fields.append(current_field.strip())
  875. # 解析每个字段
  876. parsed_fields = []
  877. for field in fields:
  878. # 检查是否有字段别名
  879. alias_pattern = r'(.*?)\s+[aA][sS]\s+(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))$'
  880. alias_match = re.search(alias_pattern, field)
  881. if alias_match:
  882. field_expr = alias_match.group(1).strip()
  883. field_alias = next((g for g in alias_match.groups()[1:] if g is not None), "")
  884. parsed_fields.append({
  885. "expression": field_expr,
  886. "alias": field_alias
  887. })
  888. else:
  889. # 没有别名的情况
  890. parsed_fields.append({
  891. "expression": field.strip(),
  892. "alias": None
  893. })
  894. # 提取FROM子句和表名
  895. from_pattern = r'FROM\s+(.*?)(?:\s+WHERE|\s+GROUP|\s+HAVING|\s+ORDER|\s+LIMIT|$)'
  896. from_match = re.search(from_pattern, sql_query, re.IGNORECASE | re.DOTALL)
  897. tables = []
  898. if from_match:
  899. from_clause = from_match.group(1).strip()
  900. # 分析FROM子句中的表
  901. table_pattern = r'(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))(?:\s+(?:AS\s+)?(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))?'
  902. for match in re.finditer(table_pattern, from_clause):
  903. table_name = match.group(1) or match.group(2) or match.group(3) or match.group(4)
  904. if table_name:
  905. tables.append(table_name)
  906. return tables
  907. except Exception as e:
  908. logger.error(f"解析SELECT查询语句失败: {str(e)}")
  909. # logger.error(traceback.format_exc())
  910. return None
  911. def model_resource_list(page, page_size, name_filter=None):
  912. """获取模型资源列表"""
  913. try:
  914. with neo4j_driver.get_session() as session:
  915. # 构建查询条件
  916. match_clause = "MATCH (n:model_resource)"
  917. where_clause = ""
  918. if name_filter:
  919. where_clause = f" WHERE n.name CONTAINS '{name_filter}'"
  920. # 计算总数
  921. count_cypher = f"{match_clause}{where_clause} RETURN count(n) as count"
  922. count_result = session.run(count_cypher)
  923. total_count = count_result.single()["count"]
  924. # 分页查询
  925. skip = (page - 1) * page_size
  926. cypher = f"""
  927. {match_clause}{where_clause}
  928. RETURN n
  929. ORDER BY n.createTime DESC
  930. SKIP {skip} LIMIT {page_size}
  931. """
  932. result = session.run(cypher)
  933. # 格式化结果
  934. resources = []
  935. for record in result:
  936. node = dict(record["n"])
  937. node["id"] = record["n"].id
  938. resources.append(node)
  939. return resources, total_count
  940. except Exception as e:
  941. logger.error(f"获取模型资源列表失败: {str(e)}")
  942. return [], 0
  943. def data_resource_edit(data):
  944. """编辑数据资源"""
  945. try:
  946. resource_id = data.get("id")
  947. if not resource_id:
  948. raise ValueError("缺少资源ID")
  949. with neo4j_driver.get_session() as session:
  950. # 更新节点属性
  951. update_fields = {}
  952. for key, value in data.items():
  953. if key != "id" and key != "tag":
  954. update_fields[key] = value
  955. # 添加更新时间
  956. update_fields["updateTime"] = get_formatted_time()
  957. # 构建更新语句
  958. set_clause = ", ".join([f"n.{k} = ${k}" for k in update_fields.keys()])
  959. cypher = f"""
  960. MATCH (n:data_resource)
  961. WHERE id(n) = $resource_id
  962. SET {set_clause}
  963. RETURN n
  964. """
  965. result = session.run(cypher, resource_id=int(resource_id), **update_fields)
  966. updated_node = result.single()
  967. if not updated_node:
  968. raise ValueError("资源不存在")
  969. # 处理标签关系
  970. tag_id = data.get("tag")
  971. if tag_id:
  972. # 删除旧的标签关系
  973. delete_rel_cypher = """
  974. MATCH (n:data_resource)-[r:label]->()
  975. WHERE id(n) = $resource_id
  976. DELETE r
  977. """
  978. session.run(delete_rel_cypher, resource_id=int(resource_id))
  979. # 创建新的标签关系
  980. create_rel_cypher = """
  981. MATCH (n:data_resource), (t:data_label)
  982. WHERE id(n) = $resource_id AND id(t) = $tag_id
  983. CREATE (n)-[r:label]->(t)
  984. RETURN r
  985. """
  986. session.run(create_rel_cypher, resource_id=int(resource_id), tag_id=int(tag_id))
  987. # 返回更新后的节点
  988. return dict(updated_node["n"])
  989. except Exception as e:
  990. logger.error(f"编辑数据资源失败: {str(e)}")
  991. raise
  992. def handle_data_source(data_source):
  993. """处理数据源的检查和创建
  994. 参数:
  995. data_source: 包含数据源信息的字典,支持两种情况:
  996. 1. 简单情况(只需查询已有数据源),只要包含en_name即可:
  997. {
  998. "en_name": "10-52-31-104_5432_inventory"
  999. }
  1000. {
  1001. "en_name": "10-52-31-104_5432_inventory",
  1002. "name": "教师数据库"
  1003. }
  1004. 2. 完整的数据源信息(用于创建新数据源):
  1005. {
  1006. "en_name": "10-52-31-104_5432_inventory",
  1007. "name": "教师数据库",
  1008. "type": "postgresql",
  1009. "host": "10.52.31.104",
  1010. "port": 5432,
  1011. "database": "inventory_management",
  1012. "username": "app_user",
  1013. "password": "pG$ecur3P@ss789"
  1014. }
  1015. 返回:
  1016. 成功时返回数据源的名称,失败时抛出异常
  1017. """
  1018. try:
  1019. # 获取数据源名称
  1020. ds_en_name = data_source.get("en_name")
  1021. if not ds_en_name:
  1022. raise ValueError("数据源信息不完整,缺少名称")
  1023. # 检查是否为简单查询模式
  1024. # 判断数据源是否包含创建新数据源所需的必要字段
  1025. required_fields = ["type", "host", "port", "database", "username"]
  1026. has_required_fields = all(data_source.get(field) for field in required_fields)
  1027. with neo4j_driver.get_session() as session:
  1028. if not has_required_fields:
  1029. # 简单查询模式:只通过en_name查找已有数据源
  1030. logger.info(f"简单数据源查询模式,查找en_name为: {ds_en_name}")
  1031. check_name_cypher = """
  1032. MATCH (ds:data_source {en_name: $en_name})
  1033. RETURN ds
  1034. """
  1035. check_result = session.run(check_name_cypher, en_name=ds_en_name)
  1036. existing_record = check_result.single()
  1037. if existing_record:
  1038. # 数据源已存在,返回其名称
  1039. existing_data_source = dict(existing_record["ds"])
  1040. logger.info(f"根据名称找到现有数据源: {existing_data_source.get('en_name')}")
  1041. return existing_data_source.get("en_name")
  1042. else:
  1043. # 数据源不存在,抛出异常
  1044. raise ValueError(f"未找到名称为 {ds_en_name} 的数据源,请先创建该数据源或提供完整的数据源信息")
  1045. else:
  1046. # 完整的数据源信息模式:创建或获取数据源
  1047. # 确保name不为空,如果为空则使用en_name
  1048. if "name" not in data_source or not data_source["name"]:
  1049. data_source["name"] = ds_en_name
  1050. logger.info(f"数据源name为空,使用en_name作为替代: {ds_en_name}")
  1051. # 检查是否已存在相同数据源(除名称和密码外属性相同)
  1052. check_cypher = """
  1053. MATCH (ds:data_source)
  1054. WHERE ds.type = $type AND
  1055. ds.host = $host AND
  1056. ds.port = $port AND
  1057. ds.database = $database AND
  1058. ds.username = $username
  1059. RETURN ds
  1060. """
  1061. check_result = session.run(
  1062. check_cypher,
  1063. type=data_source["type"],
  1064. host=data_source["host"],
  1065. port=data_source["port"],
  1066. database=data_source["database"],
  1067. username=data_source["username"]
  1068. )
  1069. existing_record = check_result.single()
  1070. if existing_record:
  1071. # 数据源已存在,返回其名称
  1072. existing_data_source = dict(existing_record["ds"])
  1073. logger.info(f"找到现有数据源: {existing_data_source.get('en_name')}")
  1074. return existing_data_source.get("en_name")
  1075. # 数据源不存在,创建新节点
  1076. create_cypher = """
  1077. CREATE (ds:data_source $properties)
  1078. RETURN ds
  1079. """
  1080. # 添加创建时间
  1081. data_source["createTime"] = get_formatted_time()
  1082. create_result = session.run(create_cypher, properties=data_source)
  1083. created_record = create_result.single()
  1084. if not created_record:
  1085. raise RuntimeError("创建数据源节点失败")
  1086. new_data_source = dict(created_record["ds"])
  1087. logger.info(f"创建新数据源: {new_data_source.get('en_name')}")
  1088. return new_data_source.get("en_name")
  1089. except Exception as e:
  1090. logger.error(f"处理数据源失败: {str(e)}")
  1091. raise RuntimeError(f"处理数据源失败: {str(e)}")