Browse Source

修复neo4j 使用旧的COUNT{}语法产生告警的问题2

wangxq 3 tháng trước cách đây
mục cha
commit
ab8a1a3480
2 tập tin đã thay đổi với 41 bổ sung41 xóa
  1. 37 37
      app/core/data_interface/interface.py
  2. 4 4
      app/core/data_resource/resource.py

+ 37 - 37
app/core/data_interface/interface.py

@@ -57,7 +57,7 @@ def standard_list(skip_count, page_size, en_name_filter=None,
     cql = f"""
     MATCH (n:data_standard)
     WHERE {where_str}
-    RETURN properties(n) as properties,n.time as time,id(n) as nodeid,
+    RETURN properties(n) as properties,n.time as time,elementId(n) as nodeid,
            COUNT((()-[]->(n))) + COUNT(((n)-[]->()))  as relationship_count
     ORDER BY time desc
     SKIP $skip_count
@@ -108,17 +108,17 @@ def standard_kinship_graph(nodeid):
     # 查询语句
     cql = """
     MATCH(da:data_standard)
-    WHERE id(da)=$nodeId
+    WHERE elementId(da)=$nodeId
     OPTIONAL MATCH(a:data_resource)-[:clean_resource]-(da)
     OPTIONAL MATCH(b:data_model)-[:clean_model]-(da)
     WITH 
-        collect({id:toString(id(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
-        collect({id:toString(id(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
-        collect({id:toString(id(da)),text:da.name,type:split(labels(da)[0],'_')[1]}) as nodes,da,
-        collect({from:toString(id(a)),to:toString(id(da)),text:'标准'})+
-        collect({from:toString(id(b)),to:toString(id(da)),text:'标准'})as lines
+        collect({id:toString(elementId(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
+        collect({id:toString(elementId(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
+        collect({id:toString(elementId(da)),text:da.name,type:split(labels(da)[0],'_')[1]}) as nodes,da,
+        collect({from:toString(elementId(a)),to:toString(elementId(da)),text:'标准'})+
+        collect({from:toString(elementId(b)),to:toString(elementId(da)),text:'标准'})as lines
     WITH  
-        toString(id(da)) as rootId,
+        toString(elementId(da)) as rootId,
         apoc.coll.toSet(lines) as lines,
         apoc.coll.toSet(nodes) as nodes
     RETURN nodes,lines,rootId
@@ -155,17 +155,17 @@ def standard_impact_graph(nodeid):
     # 查询语句
     cql = """
         MATCH(da:data_standard)
-        WHERE id(da)=$nodeId
+        WHERE elementId(da)=$nodeId
         OPTIONAL MATCH(da)-[:clean_model]-(m1:meta_node)
         OPTIONAL MATCH(da)-[:clean_model]-(m2:meta_node)
         WITH 
-            collect({id:toString(id(da)),text:da.name,type:split(labels(da)[0],'_')[1]})+
-            collect({id:toString(id(m1)),text:m1.name})+
-            collect({id:toString(id(m2)),text:m2.name})as nodes,da,
-            collect({from:toString(id(da)),to:toString(id(m1)),text:'标准清洗'})+
-            collect({from:toString(id(da)),to:toString(id(m2)),text:'标准清洗'})as lines
+            collect({id:toString(elementId(da)),text:da.name,type:split(labels(da)[0],'_')[1]})+
+            collect({id:toString(elementId(m1)),text:m1.name})+
+            collect({id:toString(elementId(m2)),text:m2.name})as nodes,da,
+            collect({from:toString(elementId(da)),to:toString(elementId(m1)),text:'标准清洗'})+
+            collect({from:toString(elementId(da)),to:toString(elementId(m2)),text:'标准清洗'})as lines
         WITH  
-            toString(id(da)) as rootId,
+            toString(elementId(da)) as rootId,
             apoc.coll.toSet(lines) as lines,
             apoc.coll.toSet(nodes) as nodes
         RETURN nodes,lines,rootId
@@ -333,15 +333,15 @@ def id_label_graph(id):
     """
     query = """
     MATCH (n:data_label)
-    WHERE id(n) = $nodeId
+    WHERE elementId(n) = $nodeId
     OPTIONAL MATCH (a)-[:label]-(n)
     WITH 
-       collect({from: toString(id(a)), to: toString(id(n)), text: "标签"}) AS line1,
-       collect({id: toString(id(n)), text: n.name, type:"label"}) AS node1,
-       collect({id: toString(id(a)), text: a.name, type: split(labels(a)[0], '_')[1]}) AS node2, n
+       collect({from: toString(elementId(a)), to: toString(elementId(n)), text: "标签"}) AS line1,
+       collect({id: toString(elementId(n)), text: n.name, type:"label"}) AS node1,
+       collect({id: toString(elementId(a)), text: a.name, type: split(labels(a)[0], '_')[1]}) AS node2, n
     WITH apoc.coll.toSet(line1) AS lines,
                  apoc.coll.toSet(node1 + node2) AS nodes,
-                 toString(id(n)) AS res
+                 toString(elementId(n)) AS res
     RETURN lines, nodes, res
     """
     # 修复:使用正确的session方式执行查询
@@ -375,26 +375,26 @@ def label_kinship_graph(nodeid):
     # 查询语句
     cql = """
     MATCH(la:data_label)
-    WHERE id(la)=$nodeId
+    WHERE elementId(la)=$nodeId
     OPTIONAL MATCH(a:data_resource)-[:label]-(la)
     OPTIONAL MATCH(b:data_model)-[:label]-(la)
     OPTIONAL MATCH(meta:meta_node)-[:label]-(la)
     OPTIONAL MATCH(d:data_standard)-[:label]-(la)
     OPTIONAL MATCH(e:data_metric)-[:label]-(la)
     WITH 
-        collect({id:toString(id(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
-        collect({id:toString(id(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
-        collect({id:toString(id(d)),text:d.name,type:split(labels(d)[0],'_')[1]})+
-        collect({id:toString(id(e)),text:e.name,type:split(labels(e)[0],'_')[1]})+
-        collect({id:toString(id(la)),text:la.name,type:split(labels(la)[0],'_')[1]})+
-        collect({id:toString(id(meta)),text:meta.name}) as nodes,la,
-        collect({from:toString(id(a)),to:toString(id(la)),text:'标签'})+
-        collect({from:toString(id(b)),to:toString(id(la)),text:'标签'})+
-        collect({from:toString(id(meta)),to:toString(id(la)),text:'标签'})+
-        collect({from:toString(id(d)),to:toString(id(la)),text:'标签'})+
-        collect({from:toString(id(e)),to:toString(id(la)),text:'标签'})as lines
+        collect({id:toString(elementId(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
+        collect({id:toString(elementId(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
+        collect({id:toString(elementId(d)),text:d.name,type:split(labels(d)[0],'_')[1]})+
+        collect({id:toString(elementId(e)),text:e.name,type:split(labels(e)[0],'_')[1]})+
+        collect({id:toString(elementId(la)),text:la.name,type:split(labels(la)[0],'_')[1]})+
+        collect({id:toString(elementId(meta)),text:meta.name}) as nodes,la,
+        collect({from:toString(elementId(a)),to:toString(elementId(la)),text:'标签'})+
+        collect({from:toString(elementId(b)),to:toString(elementId(la)),text:'标签'})+
+        collect({from:toString(elementId(meta)),to:toString(elementId(la)),text:'标签'})+
+        collect({from:toString(elementId(d)),to:toString(elementId(la)),text:'标签'})+
+        collect({from:toString(elementId(e)),to:toString(elementId(la)),text:'标签'})as lines
     WITH  
-        toString(id(la)) as rootId,
+        toString(elementId(la)) as rootId,
         apoc.coll.toSet(lines) as lines,
         apoc.coll.toSet(nodes) as nodes
     RETURN nodes,lines,rootId
@@ -430,9 +430,9 @@ def label_impact_graph(nodeid):
     # 查询语句
     cql = """
         MATCH(n:data_label)
-        WHERE id(n)=$nodeId
-        RETURN {id:toString(id(n)),text:(n.name),type:"label"} AS nodes,
-               toString(id(n)) as rootId
+        WHERE elementId(n)=$nodeId
+        RETURN {id:toString(elementId(n)),text:(n.name),type:"label"} AS nodes,
+               toString(elementId(n)) as rootId
         """
     # 修复:使用正确的session方式执行查询
     driver = connect_graph()
@@ -467,7 +467,7 @@ def dynamic_label_list(name_filter=None):
     MATCH (n:data_label)
     WITH n, apoc.text.levenshteinSimilarity(n.group, "{name_filter}") AS similarity
     WHERE similarity > 0.1 // 设置相似度阈值
-    RETURN DISTINCT n.group as name,id(n) as nodeid
+    RETURN DISTINCT n.group as name, elementId(n) as nodeid
     """
 
     # 修复:使用正确的session方式执行查询

+ 4 - 4
app/core/data_resource/resource.py

@@ -193,7 +193,7 @@ def handle_id_resource(resource_id):
             # 查询关联的标签
             tag_cypher = """
             MATCH (n:data_resource)-[:label]->(t:data_label)
-            WHERE id(n) = $resource_id
+            WHERE elementId(n) = $resource_id
             RETURN t
             """
             tag_result = session.run(tag_cypher, resource_id=int(resource_id))
@@ -207,7 +207,7 @@ def handle_id_resource(resource_id):
             # 查询关联的元数据
             meta_cypher = """
             MATCH (n:data_resource)-[:contain]->(m:Metadata)
-            WHERE id(n) = $resource_id
+            WHERE elementId(n) = $resource_id
             RETURN m
             """
             meta_result = session.run(meta_cypher, resource_id=int(resource_id))
@@ -232,7 +232,7 @@ def id_resource_graph(resource_id):
             # 查询数据资源节点及其关系
             cypher = """
             MATCH (n:data_resource)-[r]-(m)
-            WHERE id(n) = $resource_id
+            WHERE elementId(n) = $resource_id
             RETURN n, r, m
             """
             result = session.run(cypher, resource_id=int(resource_id))
@@ -322,7 +322,7 @@ def resource_list(page, page_size, en_name_filter=None, name_filter=None,
                 # 查询关联的标签
                 tag_cypher = """
                 MATCH (n:data_resource)-[:label]->(t:data_label)
-                WHERE id(n) = $resource_id
+                WHERE elementId(n) = $resource_id
                 RETURN t
                 """
                 tag_result = session.run(tag_cypher, resource_id=node["id"])