2 次代码提交 3fe3c045ef ... 600c85fc5d

作者 SHA1 备注 提交日期
  maxiaolong 600c85fc5d 修复bug。支持tag多标签。统一graphall接口。 1 周之前
  maxiaolong 73a3630fa0 Enhance data_interface with category filter support and improve standard_list and graph_all functions. Refactor API routes for better readability and error handling. Update meta_data routes for improved JSON response handling and validation. Optimize metric_list query conditions for clarity. 1 周之前

文件差异内容过多而无法显示
+ 0 - 3
.cursor/pending_tasks.json


+ 172 - 426
app/api/business_domain/routes.py

@@ -2,6 +2,7 @@
 Business Domain API 路由模块
 提供业务领域相关的 RESTful API 接口
 """
+
 import io
 import json
 import time
@@ -11,6 +12,7 @@ import urllib.parse
 from flask import request, jsonify, current_app, send_file
 from minio import Minio
 from minio.error import S3Error
+
 from app.api.business_domain import bp
 from app.models.result import success, failed
 from app.services.neo4j_driver import neo4j_driver
@@ -24,379 +26,253 @@ from app.core.business_domain import (
     business_domain_graph_all,
     business_domain_search_list,
     business_domain_compose,
-    business_domain_label_list
+    business_domain_label_list,
 )
 
 logger = logging.getLogger("app")
 
 
+# ----------------------- MinIO helpers -----------------------
 def get_minio_client():
     """获取 MinIO 客户端实例"""
     return Minio(
-        current_app.config['MINIO_HOST'],
-        access_key=current_app.config['MINIO_USER'],
-        secret_key=current_app.config['MINIO_PASSWORD'],
-        secure=current_app.config['MINIO_SECURE']
+        current_app.config["MINIO_HOST"],
+        access_key=current_app.config["MINIO_USER"],
+        secret_key=current_app.config["MINIO_PASSWORD"],
+        secure=current_app.config["MINIO_SECURE"],
     )
 
 
 def get_minio_config():
     """获取 MinIO 配置"""
     return {
-        'MINIO_BUCKET': current_app.config['MINIO_BUCKET'],
-        'PREFIX': current_app.config.get(
-            'BUSINESS_DOMAIN_PREFIX', 'business_domain'
+        "MINIO_BUCKET": current_app.config["MINIO_BUCKET"],
+        "PREFIX": current_app.config.get(
+            "BUSINESS_DOMAIN_PREFIX", "business_domain"
         ),
-        'ALLOWED_EXTENSIONS': current_app.config['ALLOWED_EXTENSIONS']
+        "ALLOWED_EXTENSIONS": current_app.config["ALLOWED_EXTENSIONS"],
     }
 
 
 def allowed_file(filename):
     """检查文件扩展名是否允许"""
-    if '.' not in filename:
+    if "." not in filename:
         return False
-    ext = filename.rsplit('.', 1)[1].lower()
-    return ext in get_minio_config()['ALLOWED_EXTENSIONS']
+    ext = filename.rsplit(".", 1)[1].lower()
+    return ext in get_minio_config()["ALLOWED_EXTENSIONS"]
 
 
-@bp.route('/list', methods=['POST'])
+# ----------------------- Business Domain APIs -----------------------
+@bp.route("/list", methods=["POST"])
 def bd_list():
-    """
-    获取业务领域列表
-    
-    请求参数 (JSON):
-        - current: 当前页码,默认1
-        - size: 每页大小,默认10
-        - name_en: 英文名称过滤条件(可选)
-        - name_zh: 中文名称过滤条件(可选)
-        - type: 类型过滤条件,默认'all'表示不过滤(可选)
-        - category: 分类过滤条件(可选)
-        - tag: 标签过滤条件(可选)
-        
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 
-            - records: 业务领域列表
-            - total: 总数量
-            - size: 每页大小
-            - current: 当前页码
-    """
+    """获取业务领域列表"""
     try:
-        # 获取分页和筛选参数
         if not request.json:
-            return jsonify(failed('请求数据不能为空'))
-        
-        page = int(request.json.get('current', 1))
-        page_size = int(request.json.get('size', 10))
-        name_en_filter = request.json.get('name_en')
-        name_zh_filter = request.json.get('name_zh')
-        type_filter = request.json.get('type', 'all')
-        category_filter = request.json.get('category')
-        tag_filter = request.json.get('tag')
-        
-        # 调用业务逻辑查询业务领域列表
+            return jsonify(failed("请求数据不能为空"))
+
+        page = int(request.json.get("current", 1))
+        page_size = int(request.json.get("size", 10))
+        name_en_filter = request.json.get("name_en")
+        name_zh_filter = request.json.get("name_zh")
+        type_filter = request.json.get("type", "all")
+        category_filter = request.json.get("category")
+        tag_filter = request.json.get("tag")
+
         domains, total_count = business_domain_list(
-            page, 
-            page_size, 
-            name_en_filter, 
-            name_zh_filter, 
-            type_filter, 
-            category_filter, 
-            tag_filter
+            page,
+            page_size,
+            name_en_filter,
+            name_zh_filter,
+            type_filter,
+            category_filter,
+            tag_filter,
+        )
+
+        return jsonify(
+            success(
+                {
+                    "records": domains,
+                    "total": total_count,
+                    "size": page_size,
+                    "current": page,
+                }
+            )
         )
-        
-        # 返回结果
-        return jsonify(success({
-            "records": domains,
-            "total": total_count,
-            "size": page_size,
-            "current": page
-        }))
     except Exception as e:
         logger.error(f"获取业务领域列表失败: {str(e)}")
         return jsonify(failed("获取业务领域列表失败", error=str(e)))
 
 
-@bp.route('/detail', methods=['POST'])
+@bp.route("/detail", methods=["POST"])
 def bd_detail():
-    """
-    获取业务领域详情
-    
-    请求参数 (JSON):
-        - id: 业务领域节点ID(必填)
-        
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 业务领域详情
-    """
+    """获取业务领域详情"""
     try:
-        # 获取参数
         if not request.json:
-            return jsonify(failed('请求数据不能为空'))
-        
-        domain_id = request.json.get('id')
-        
+            return jsonify(failed("请求数据不能为空"))
+
+        domain_id = request.json.get("id")
         if domain_id is None:
             return jsonify(failed("业务领域ID不能为空"))
-        
-        # 确保传入的ID为整数
+
         try:
             domain_id = int(domain_id)
         except (ValueError, TypeError):
             return jsonify(failed(f"业务领域ID必须为整数, 收到的是: {domain_id}"))
-        
-        # 调用业务逻辑查询业务领域详情
+
         domain_data = get_business_domain_by_id(domain_id)
-        
         if not domain_data:
             return jsonify(failed("业务领域不存在"))
-        
+
         return jsonify(success(domain_data))
     except Exception as e:
         logger.error(f"获取业务领域详情失败: {str(e)}")
         return jsonify(failed("获取业务领域详情失败", error=str(e)))
 
 
-@bp.route('/delete', methods=['POST'])
+@bp.route("/delete", methods=["POST"])
 def bd_delete():
-    """
-    删除业务领域
-    
-    请求参数 (JSON):
-        - id: 业务领域节点ID(必填)
-        
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 删除结果
-    """
+    """删除业务领域"""
     try:
-        # 获取参数
         if not request.json:
             return jsonify(failed("请求数据不能为空"))
-        
-        domain_id = request.json.get('id')
+
+        domain_id = request.json.get("id")
         if domain_id is None:
             return jsonify(failed("业务领域ID不能为空"))
-        
-        # 调用业务逻辑删除业务领域
+
         result = delete_business_domain(domain_id)
-        
         if result:
             return jsonify(success({"message": "业务领域删除成功"}))
-        else:
-            return jsonify(failed("业务领域删除失败"))
+        return jsonify(failed("业务领域删除失败"))
     except Exception as e:
         logger.error(f"删除业务领域失败: {str(e)}")
         return jsonify(failed("删除业务领域失败", error=str(e)))
 
 
-@bp.route('/save', methods=['POST'])
+@bp.route("/save", methods=["POST"])
 def bd_save():
-    """
-    保存业务领域(新建或更新)
-
-    请求参数 (JSON):
-        - id: 业务领域节点ID(可选,有则更新,无则新建)
-        - name_zh: 中文名称(新建时必填)
-        - name_en: 英文名称(新建时必填)
-        - describe: 描述(可选)
-        - type: 类型(可选)
-        - category: 分类(可选)
-        - tag: 标签ID(可选)
-        - data_source: 数据源ID(可选)
-        - 其他属性字段...
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 保存后的业务领域数据
-    """
+    """保存业务领域(新建或更新)"""
     try:
-        # 获取保存数据
         data = request.json
-
         if not data:
             return jsonify(failed("请求数据不能为空"))
 
-        # 新建时校验必填字段
         if not data.get("id"):
             if not data.get("name_zh") or not data.get("name_en"):
                 return jsonify(failed("新建时 name_zh 和 name_en 为必填项"))
 
-        # 调用业务逻辑保存业务领域
         saved_data = save_business_domain(data)
-
         return jsonify(success(saved_data))
     except Exception as e:
         logger.error(f"保存业务领域失败: {str(e)}")
         return jsonify(failed("保存业务领域失败", error=str(e)))
 
 
-@bp.route('/update', methods=['POST'])
+@bp.route("/update", methods=["POST"])
 def bd_update():
-    """
-    更新业务领域
-
-    请求参数 (JSON):
-        - id: 业务领域节点ID(必填)
-        - name_zh: 中文名称(可选)
-        - name_en: 英文名称(可选)
-        - describe: 描述(可选)
-        - tag: 标签ID(可选)
-        - data_source: 数据源ID(可选)
-        - 其他属性字段...
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 更新后的业务领域数据
-    """
+    """更新业务领域"""
     try:
-        # 获取更新数据
         data = request.json
-
         if not data or "id" not in data:
             return jsonify(failed("参数不完整"))
 
-        # 调用业务逻辑更新业务领域
         updated_data = update_business_domain(data)
-
         return jsonify(success(updated_data))
     except Exception as e:
         logger.error(f"更新业务领域失败: {str(e)}")
         return jsonify(failed("更新业务领域失败", error=str(e)))
 
 
-@bp.route('/upload', methods=['POST'])
+@bp.route("/upload", methods=["POST"])
 def bd_upload():
-    """
-    上传业务领域相关文件
-
-    请求参数 (multipart/form-data):
-        - file: 上传的文件(必填)
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data:
-            - filename: 原始文件名
-            - size: 文件大小(字节)
-            - type: 文件类型
-            - url: 文件存储路径
-    """
+    """上传业务领域相关文件"""
+    response = None
     try:
-        # 检查请求中是否有文件
-        if 'file' not in request.files:
+        if "file" not in request.files:
             return jsonify(failed("没有找到上传的文件"))
 
-        file = request.files['file']
-
-        # 检查文件名
-        if file.filename == '':
+        file = request.files["file"]
+        if file.filename == "":
             return jsonify(failed("未选择文件"))
-
-        # 检查文件类型
         if not allowed_file(file.filename):
             return jsonify(failed("不支持的文件类型"))
 
-        # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
 
-        # 读取文件内容
         file_content = file.read()
         file_size = len(file_content)
-        filename = file.filename or ''
-        file_type = filename.rsplit('.', 1)[1].lower()
-
-        # 提取文件名(不包含扩展名)
-        filename_without_ext = filename.rsplit('.', 1)[0]
-
-        # 生成紧凑的时间戳 (yyyyMMddHHmmss)
+        filename = file.filename or ""
+        file_type = filename.rsplit(".", 1)[1].lower()
+        filename_without_ext = filename.rsplit(".", 1)[0]
         timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
 
-        # 生成唯一文件名
-        prefix = config['PREFIX']
         object_name = (
-            f"{prefix}/{filename_without_ext}_{timestamp}.{file_type}"
+            f"{config['PREFIX']}/"
+            f"{filename_without_ext}_{timestamp}.{file_type}"
         )
 
-        # 上传文件到 MinIO
         minio_client.put_object(
-            config['MINIO_BUCKET'],
+            config["MINIO_BUCKET"],
             object_name,
             io.BytesIO(file_content),
             file_size,
-            content_type=f"application/{file_type}"
+            content_type=f"application/{file_type}",
         )
 
         logger.info(f"文件上传成功: {object_name}, 大小: {file_size}")
 
-        # 返回结果
-        return jsonify(success({
-            "filename": file.filename,
-            "size": file_size,
-            "type": file_type,
-            "url": object_name
-        }))
+        return jsonify(
+            success(
+                {
+                    "filename": file.filename,
+                    "size": file_size,
+                    "type": file_type,
+                    "url": object_name,
+                }
+            )
+        )
     except Exception as e:
         logger.error(f"文件上传失败: {str(e)}")
         return jsonify(failed("文件上传失败", error=str(e)))
+    finally:
+        if response:
+            response.close()
+            response.release_conn()
 
 
-@bp.route('/download', methods=['GET'])
+@bp.route("/download", methods=["GET"])
 def bd_download():
-    """
-    下载业务领域相关文件
-
-    请求参数 (URL Query):
-        - url: 文件存储路径(必填)
-
-    返回:
-        - 文件流(作为附件下载)
-    """
+    """下载业务领域相关文件"""
     response = None
     try:
-        # 获取文件路径参数
-        object_name = request.args.get('url')
+        object_name = request.args.get("url")
         if not object_name:
             return jsonify(failed("文件路径不能为空"))
 
-        # URL解码,处理特殊字符
         object_name = urllib.parse.unquote(object_name)
-
-        # 记录下载请求信息,便于调试
         logger.info(f"下载文件请求: {object_name}")
 
-        # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
 
-        # 获取文件
         try:
             response = minio_client.get_object(
-                config['MINIO_BUCKET'], object_name
+                config["MINIO_BUCKET"], object_name
             )
             file_data = response.read()
         except S3Error as e:
             logger.error(f"MinIO获取文件失败: {str(e)}")
             return jsonify(failed(f"文件获取失败: {str(e)}"))
 
-        # 获取文件名
-        file_name = object_name.split('/')[-1]
-
-        # 直接从内存返回文件,不创建临时文件
+        file_name = object_name.split("/")[-1]
         file_stream = io.BytesIO(file_data)
 
-        # 返回文件
         return send_file(
             file_stream,
             as_attachment=True,
             download_name=file_name,
-            mimetype="application/octet-stream"
+            mimetype="application/octet-stream",
         )
     except Exception as e:
         logger.error(f"文件下载失败: {str(e)}")
@@ -407,119 +283,68 @@ def bd_download():
             response.release_conn()
 
 
-@bp.route('/graphall', methods=['POST'])
+@bp.route("/graphall", methods=["POST"])
 def bd_graph_all():
-    """
-    获取业务领域完整关系图谱
-
-    请求参数 (JSON):
-        - id: 业务领域节点ID(必填)
-        - meta: 是否包含元数据节点,默认True(可选)
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data:
-            - nodes: 节点列表
-            - lines: 关系列表
-    """
+    """获取业务领域完整关系图谱"""
     try:
-        # 获取参数
         if not request.json:
-            return jsonify(failed('请求数据不能为空'))
-
-        domain_id = request.json.get('id')
-        include_meta = request.json.get('meta', True)
+            return jsonify(failed("请求数据不能为空"))
 
+        domain_id = request.json.get("id")
+        include_meta = request.json.get("meta", True)
         if domain_id is None:
             return jsonify(failed("业务领域ID不能为空"))
 
-        # 确保传入的ID为整数
         try:
             domain_id = int(domain_id)
         except (ValueError, TypeError):
-            return jsonify(failed(
-                f"业务领域ID必须为整数, 收到的是: {domain_id}"
-            ))
+            return jsonify(failed(f"业务领域ID必须为整数, 收到的是: {domain_id}"))
 
-        # 调用业务逻辑获取完整图谱
         graph_data = business_domain_graph_all(domain_id, include_meta)
-
         return jsonify(success(graph_data))
     except Exception as e:
         logger.error(f"获取业务领域图谱失败: {str(e)}")
         return jsonify(failed("获取业务领域图谱失败", error=str(e)))
 
 
-@bp.route('/ddlparse', methods=['POST'])
+@bp.route("/ddlparse", methods=["POST"])
 def bd_ddl_parse():
-    """
-    解析DDL语句,用于业务领域创建
-
-    请求参数:
-        - file: SQL文件(multipart/form-data,可选)
-        - sql: SQL内容(JSON,可选)
-        至少提供其中一种方式
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 解析后的DDL列表,包含表信息和字段信息
-    """
+    """解析DDL语句,用于业务领域创建"""
     try:
-        # 获取参数 - 支持两种方式:上传文件或JSON
-        sql_content = ''
+        sql_content = ""
 
-        # 检查是否有文件上传
-        if 'file' in request.files:
-            file = request.files['file']
-            # 检查文件是否存在且文件名不为空
+        if "file" in request.files:
+            file = request.files["file"]
             if file and file.filename:
-                # 检查是否是SQL文件
-                if not file.filename.lower().endswith('.sql'):
+                if not file.filename.lower().endswith(".sql"):
                     return jsonify(failed("只接受SQL文件"))
-
-                # 读取文件内容
-                sql_content = file.read().decode('utf-8')
-                logger.info(
-                    f"从上传的文件中读取SQL内容,文件名: {file.filename}"
-                )
-        # 如果没有文件上传,检查是否有JSON输入
+                sql_content = file.read().decode("utf-8")
+                logger.info(f"从上传的文件中读取SQL内容,文件名: {file.filename}")
         elif request.is_json and request.json:
-            sql_content = request.json.get('sql', '')
+            sql_content = request.json.get("sql", "")
 
-        # 如果两种方式都没有提供SQL内容,则返回错误
         if not sql_content:
-            return jsonify(failed(
-                "SQL内容不能为空,请上传SQL文件或提供SQL内容"
-            ))
+            return jsonify(failed("SQL内容不能为空,请上传SQL文件或提供SQL内容"))
 
         parser = DDLParser()
-        # 提取创建表的DDL语句
         ddl_list = parser.parse_ddl(sql_content)
-
         if not ddl_list:
             return jsonify(failed("未找到有效的CREATE TABLE语句"))
 
-        # 处理表的存在状态
         if isinstance(ddl_list, list):
-            # 新格式:数组格式
-            # 获取所有表名
             table_names = []
             for table_item in ddl_list:
-                if isinstance(table_item, dict) and 'table_info' in table_item:
-                    table_name = table_item['table_info'].get('name_en')
+                if isinstance(table_item, dict) and "table_info" in table_item:
+                    table_name = table_item["table_info"].get("name_en")
                     if table_name:
                         table_names.append(table_name)
 
-            # 首先为所有表设置默认的exist状态
             for table_item in ddl_list:
                 if isinstance(table_item, dict):
                     table_item["exist"] = False
 
             if table_names:
                 try:
-                    # 查询业务领域是否存在
                     with neo4j_driver.get_session() as session:
                         table_query = """
                         UNWIND $names AS name
@@ -530,30 +355,26 @@ def bd_ddl_parse():
                             table_query, names=table_names
                         )
 
-                        # 创建存在状态映射
                         exist_map = {}
                         for record in table_results:
-                            table_name = record["name"]
+                            t_name = record["name"]
                             exists = record["exists"]
-                            exist_map[table_name] = exists
+                            exist_map[t_name] = exists
 
-                        # 更新存在的表的状态
                         for table_item in ddl_list:
-                            if (isinstance(table_item, dict)
-                                    and 'table_info' in table_item):
-                                info = table_item['table_info']
-                                t_name = info.get('name_en')
+                            if (
+                                isinstance(table_item, dict)
+                                and "table_info" in table_item
+                            ):
+                                info = table_item["table_info"]
+                                t_name = info.get("name_en")
                                 if t_name and t_name in exist_map:
                                     table_item["exist"] = exist_map[t_name]
                 except Exception as e:
                     logger.error(f"检查业务领域存在状态失败: {str(e)}")
-                    # 如果查询失败,所有表保持默认的False状态
 
         elif isinstance(ddl_list, dict):
-            # 兼容旧格式:字典格式(以表名为key)
             table_names = list(ddl_list.keys())
-
-            # 首先为所有表设置默认的exist状态
             for table_name in table_names:
                 if isinstance(ddl_list[table_name], dict):
                     ddl_list[table_name]["exist"] = False
@@ -565,7 +386,6 @@ def bd_ddl_parse():
 
             if table_names:
                 try:
-                    # 查询业务领域是否存在
                     with neo4j_driver.get_session() as session:
                         table_query = """
                         UNWIND $names AS name
@@ -576,24 +396,21 @@ def bd_ddl_parse():
                             table_query, names=table_names
                         )
 
-                        # 更新存在的表的状态
                         for record in table_results:
-                            table_name = record["name"]
+                            t_name = record["name"]
                             exists = record["exists"]
                             is_valid = (
-                                table_name in ddl_list
-                                and isinstance(ddl_list[table_name], dict)
+                                t_name in ddl_list
+                                and isinstance(ddl_list[t_name], dict)
                             )
                             if is_valid:
-                                ddl_list[table_name]["exist"] = exists
+                                ddl_list[t_name]["exist"] = exists
                 except Exception as e:
                     logger.error(f"检查业务领域存在状态失败: {str(e)}")
-                    # 如果查询失败,所有表保持默认的False状态
 
         logger.debug(
             f"识别到的DDL语句: {json.dumps(ddl_list, ensure_ascii=False)}"
         )
-
         return jsonify(success(ddl_list))
     except Exception as e:
         logger.error(f"解析DDL语句失败: {str(e)}")
@@ -601,58 +418,30 @@ def bd_ddl_parse():
         return jsonify(failed("解析DDL语句失败", error=str(e)))
 
 
-@bp.route('/search', methods=['POST'])
+@bp.route("/search", methods=["POST"])
 def bd_search():
-    """
-    搜索业务领域关联的元数据
-
-    请求参数 (JSON):
-        - id: 业务领域节点ID(必填)
-        - current: 当前页码,默认1
-        - size: 每页大小,默认10
-        - name_en: 英文名称过滤条件(可选)
-        - name_zh: 中文名称过滤条件(可选)
-        - category: 分类过滤条件(可选)
-        - tag: 标签过滤条件(可选)
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data:
-            - records: 元数据列表
-            - total: 总数量
-            - size: 每页大小
-            - current: 当前页码
-    """
+    """搜索业务领域关联的元数据"""
     try:
-        # 获取分页和筛选参数
         if not request.json:
-            return jsonify(failed('请求数据不能为空'))
+            return jsonify(failed("请求数据不能为空"))
 
-        page = int(request.json.get('current', 1))
-        page_size = int(request.json.get('size', 10))
-        domain_id = request.json.get('id')
+        page = int(request.json.get("current", 1))
+        page_size = int(request.json.get("size", 10))
+        domain_id = request.json.get("id")
 
-        name_en_filter = request.json.get('name_en')
-        name_zh_filter = request.json.get('name_zh')
-        category_filter = request.json.get('category')
-        tag_filter = request.json.get('tag')
+        name_en_filter = request.json.get("name_en")
+        name_zh_filter = request.json.get("name_zh")
+        category_filter = request.json.get("category")
+        tag_filter = request.json.get("tag")
 
         if domain_id is None:
             return jsonify(failed("业务领域ID不能为空"))
 
-        # 确保传入的ID为整数
         try:
             domain_id = int(domain_id)
         except (ValueError, TypeError):
-            return jsonify(failed(
-                f"业务领域ID必须为整数, 收到的是: {domain_id}"
-            ))
-
-        # 记录请求信息
-        logger.info(f"获取业务领域关联元数据请求,ID: {domain_id}")
+            return jsonify(failed(f"业务领域ID必须为整数, 收到的是: {domain_id}"))
 
-        # 调用业务逻辑查询关联元数据
         metadata_list, total_count = business_domain_search_list(
             domain_id,
             page,
@@ -660,121 +449,78 @@ def bd_search():
             name_en_filter,
             name_zh_filter,
             category_filter,
-            tag_filter
+            tag_filter,
         )
 
-        # 返回结果
-        return jsonify(success({
-            "records": metadata_list,
-            "total": total_count,
-            "size": page_size,
-            "current": page
-        }))
+        return jsonify(
+            success(
+                {
+                    "records": metadata_list,
+                    "total": total_count,
+                    "size": page_size,
+                    "current": page,
+                }
+            )
+        )
     except Exception as e:
         logger.error(f"业务领域关联元数据搜索失败: {str(e)}")
         return jsonify(failed("业务领域关联元数据搜索失败", error=str(e)))
 
 
-@bp.route('/compose', methods=['POST'])
+@bp.route("/compose", methods=["POST"])
 def bd_compose():
-    """
-    从已有业务领域中组合创建新的业务领域
-
-    请求参数 (JSON):
-        - name_zh: 中文名称(必填)
-        - name_en: 英文名称(可选,不提供则自动翻译)
-        - id_list: 关联的业务领域和元数据列表(必填)
-            格式: [{"domain_id": 123, "metaData": [{"id": 456}, ...]}]
-        - describe: 描述(可选)
-        - type: 类型(可选)
-        - category: 分类(可选)
-        - tag: 标签ID(可选)
-        - data_source: 数据源ID(可选)
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data: 创建后的业务领域数据
-    """
+    """从已有业务领域中组合创建新的业务领域"""
     try:
-        # 获取请求数据
         data = request.json
-
         if not data:
             return jsonify(failed("请求数据不能为空"))
 
-        # 校验必填字段
         if not data.get("name_zh"):
             return jsonify(failed("name_zh 为必填项"))
-
         if not data.get("id_list"):
             return jsonify(failed("id_list 为必填项"))
 
-        # 调用业务逻辑组合创建业务领域
         result_data = business_domain_compose(data)
-
-        # 构建响应数据
-        response_data = {
-            "business_domain": result_data
-        }
-
+        response_data = {"business_domain": result_data}
         return jsonify(success(response_data))
     except Exception as e:
         logger.error(f"组合创建业务领域失败: {str(e)}")
         return jsonify(failed("组合创建业务领域失败", error=str(e)))
 
 
-@bp.route('/labellist', methods=['POST'])
+@bp.route("/labellist", methods=["POST"])
 def bd_label_list():
-    """
-    获取数据标签列表(用于业务领域关联)
-
-    请求参数 (JSON):
-        - current: 当前页码,默认1
-        - size: 每页大小,默认10
-        - name_en: 英文名称过滤条件(可选)
-        - name_zh: 中文名称过滤条件(可选)
-        - category: 分类过滤条件(可选)
-        - group: 分组过滤条件(可选)
-
-    返回:
-        - success: 是否成功
-        - message: 消息
-        - data:
-            - records: 标签列表
-            - total: 总数量
-            - size: 每页大小
-            - current: 当前页码
-    """
+    """获取数据标签列表(用于业务领域关联)"""
     try:
-        # 获取分页和筛选参数
         if not request.json:
-            return jsonify(failed('请求数据不能为空'))
+            return jsonify(failed("请求数据不能为空"))
 
-        page = int(request.json.get('current', 1))
-        page_size = int(request.json.get('size', 10))
-        name_en_filter = request.json.get('name_en')
-        name_zh_filter = request.json.get('name_zh')
-        category_filter = request.json.get('category')
-        group_filter = request.json.get('group')
+        page = int(request.json.get("current", 1))
+        page_size = int(request.json.get("size", 10))
+        name_en_filter = request.json.get("name_en")
+        name_zh_filter = request.json.get("name_zh")
+        category_filter = request.json.get("category")
+        group_filter = request.json.get("group")
 
-        # 调用业务逻辑查询标签列表
         labels, total_count = business_domain_label_list(
             page,
             page_size,
             name_en_filter,
             name_zh_filter,
             category_filter,
-            group_filter
+            group_filter,
         )
 
-        # 返回结果
-        return jsonify(success({
-            "records": labels,
-            "total": total_count,
-            "size": page_size,
-            "current": page
-        }))
+        return jsonify(
+            success(
+                {
+                    "records": labels,
+                    "total": total_count,
+                    "size": page_size,
+                    "current": page,
+                }
+            )
+        )
     except Exception as e:
         logger.error(f"获取标签列表失败: {str(e)}")
         return jsonify(failed("获取标签列表失败", error=str(e)))

+ 32 - 14
app/api/data_flow/routes.py

@@ -1,4 +1,4 @@
-from flask import request, jsonify
+from flask import request
 from app.api.data_flow import bp
 from app.core.data_flow.dataflows import DataFlowService
 import logging
@@ -9,6 +9,7 @@ from app.core.graph.graph_operations import MyEncoder
 
 logger = logging.getLogger(__name__)
 
+
 @bp.route('/get-dataflows-list', methods=['GET'])
 def get_dataflows():
     """获取数据流列表"""
@@ -16,8 +17,12 @@ def get_dataflows():
         page = request.args.get('page', 1, type=int)
         page_size = request.args.get('page_size', 10, type=int)
         search = request.args.get('search', '')
-        
-        result = DataFlowService.get_dataflows(page=page, page_size=page_size, search=search)
+
+        result = DataFlowService.get_dataflows(
+            page=page,
+            page_size=page_size,
+            search=search,
+        )
         res = success(result, "success")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except Exception as e:
@@ -25,6 +30,7 @@ def get_dataflows():
         res = failed(f'获取数据流列表失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/get-dataflow/<int:dataflow_id>', methods=['GET'])
 def get_dataflow(dataflow_id):
     """根据ID获取数据流详情"""
@@ -41,6 +47,7 @@ def get_dataflow(dataflow_id):
         res = failed(f'获取数据流详情失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/add-dataflow', methods=['POST'])
 def create_dataflow():
     """创建新的数据流"""
@@ -49,7 +56,7 @@ def create_dataflow():
         if not data:
             res = failed("请求数据不能为空", code=400)
             return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-            
+
         result = DataFlowService.create_dataflow(data)
         res = success(result, "数据流创建成功")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
@@ -62,6 +69,7 @@ def create_dataflow():
         res = failed(f'创建数据流失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/update-dataflow/<int:dataflow_id>', methods=['PUT'])
 def update_dataflow(dataflow_id):
     """更新数据流"""
@@ -70,7 +78,7 @@ def update_dataflow(dataflow_id):
         if not data:
             res = failed("请求数据不能为空", code=400)
             return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-            
+
         result = DataFlowService.update_dataflow(dataflow_id, data)
         if result:
             res = success(result, "数据流更新成功")
@@ -83,6 +91,7 @@ def update_dataflow(dataflow_id):
         res = failed(f'更新数据流失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/delete-dataflow/<int:dataflow_id>', methods=['DELETE'])
 def delete_dataflow(dataflow_id):
     """删除数据流"""
@@ -99,6 +108,7 @@ def delete_dataflow(dataflow_id):
         res = failed(f'删除数据流失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/execute-dataflow/<int:dataflow_id>', methods=['POST'])
 def execute_dataflow(dataflow_id):
     """执行数据流"""
@@ -112,6 +122,7 @@ def execute_dataflow(dataflow_id):
         res = failed(f'执行数据流失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/get-dataflow-status/<int:dataflow_id>', methods=['GET'])
 def get_dataflow_status(dataflow_id):
     """获取数据流执行状态"""
@@ -124,14 +135,19 @@ def get_dataflow_status(dataflow_id):
         res = failed(f'获取数据流状态失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/get-dataflow-logs/<int:dataflow_id>', methods=['GET'])
 def get_dataflow_logs(dataflow_id):
     """获取数据流执行日志"""
     try:
         page = request.args.get('page', 1, type=int)
         page_size = request.args.get('page_size', 50, type=int)
-        
-        result = DataFlowService.get_dataflow_logs(dataflow_id, page=page, page_size=page_size)
+
+        result = DataFlowService.get_dataflow_logs(
+            dataflow_id,
+            page=page,
+            page_size=page_size,
+        )
         res = success(result, "success")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except Exception as e:
@@ -139,6 +155,7 @@ def get_dataflow_logs(dataflow_id):
         res = failed(f'获取数据流日志失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/create-script', methods=['POST'])
 def create_script():
     """使用Deepseek模型生成脚本"""
@@ -147,20 +164,20 @@ def create_script():
         if not json_data:
             res = failed("请求数据不能为空", code=400)
             return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-        
+
         # 记录接收到的数据用于调试
         logger.info(f"create_script接收到的数据: {json_data}")
         logger.info(f"json_data类型: {type(json_data)}")
-        
+
         # 直接使用前端提交的json_data作为request_data参数
         script_content = DataFlowService.create_script(json_data)
-        
+
         result_data = {
             'script_content': script_content,
             'format': 'txt',
             'generated_at': datetime.now().isoformat()
         }
-        
+
         res = success(result_data, "脚本生成成功")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except ValueError as ve:
@@ -172,18 +189,19 @@ def create_script():
         res = failed(f'脚本生成失败: {str(e)}')
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
+
 @bp.route('/get-BD-list', methods=['GET'])
 def get_business_domain_list():
     """获取BusinessDomain节点列表"""
     try:
         logger.info("接收到获取BusinessDomain列表请求")
-        
+
         # 调用服务层函数获取BusinessDomain列表
         bd_list = DataFlowService.get_business_domain_list()
-        
+
         res = success(bd_list, "操作成功")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except Exception as e:
         logger.error(f"获取BusinessDomain列表失败: {str(e)}")
         res = failed(f'获取BusinessDomain列表失败: {str(e)}', 500, {})
-        return json.dumps(res, ensure_ascii=False, cls=MyEncoder) 
+        return json.dumps(res, ensure_ascii=False, cls=MyEncoder)

+ 152 - 87
app/api/data_interface/routes.py

@@ -1,21 +1,23 @@
-from flask import request
+from flask import request, jsonify
 from app.api.data_interface import bp
 from app.models.result import success, failed
-from app.core.graph.graph_operations import connect_graph, MyEncoder, create_or_get_node
+from app.core.graph.graph_operations import (
+    connect_graph,
+    MyEncoder,
+    create_or_get_node,
+)
 from app.core.data_interface import interface
 from app.core.meta_data import translate_and_parse, get_formatted_time
 from app.core.llm import code_generate_standard
 import json
 
 
-# 数据标准新增 data_standard
 @bp.route('/data/standard/add', methods=['POST'])
 def data_standard_add():
     try:
-        # 传入请求参数
         receiver = request.get_json()
-        name_zh = receiver['name_zh']  # 中文名称
-        name_en = translate_and_parse(name_zh)  # 英文名
+        name_zh = receiver['name_zh']
+        name_en = translate_and_parse(name_zh)
         receiver['name_en'] = name_en[0]
         receiver['create_time'] = get_formatted_time()
         receiver['tag'] = json.dumps(receiver['tag'], ensure_ascii=False)
@@ -30,15 +32,13 @@ def data_standard_add():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标准详情 data_standard
 @bp.route('/data/standard/detail', methods=['POST'])
 def data_standard_detail():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         nodeid = receiver['id']  # id
 
-        cql = """MATCH (n:data_standard) where id(n) = $nodeId 
+        cql = """MATCH (n:data_standard) where id(n) = $nodeId
                   RETURN properties(n) as property"""
         # Create a session from the driver returned by connect_graph
         with connect_graph().session() as session:
@@ -58,18 +58,16 @@ def data_standard_detail():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标准代码 data_standard
 @bp.route('/data/standard/code', methods=['POST'])
 def data_standard_code():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         input = receiver['input']
         describe = receiver['describe']
         output = receiver['output']
         relation = {
-            "输入参数": input,
-            "输出参数": output
+            "input_params": input,
+            "output_params": output,
         }
         result = code_generate_standard(describe, relation)
 
@@ -81,14 +79,12 @@ def data_standard_code():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标准更新 data_standard 未加到接口文档
 @bp.route('/data/standard/update', methods=['POST'])
 def data_standard_update():
     try:
-        # 传入请求参数
         receiver = request.get_json()
-        name_zh = receiver['name_zh']  # 中文名称
-        name_en = translate_and_parse(name_zh)  # 英文名
+        name_zh = receiver['name_zh']
+        name_en = translate_and_parse(name_zh)
         receiver['name_en'] = name_en[0]
         receiver['create_time'] = get_formatted_time()
 
@@ -102,11 +98,9 @@ def data_standard_update():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标准列表展示
 @bp.route('/data/standard/list', methods=['POST'])
 def data_standard_list():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         page = int(receiver.get('current', 1))
         page_size = int(receiver.get('size', 10))
@@ -115,13 +109,23 @@ def data_standard_list():
         category = receiver.get('category', None)
         time = receiver.get('time', None)
 
-        # 计算跳过的记录的数量
         skip_count = (page - 1) * page_size
 
-        data, total = interface.standard_list(skip_count, page_size, name_en_filter,
-                                              name_zh_filter, category, time)
-
-        response_data = {'records': data, 'total': total, 'size': page_size, 'current': page}
+        data, total = interface.standard_list(
+            skip_count,
+            page_size,
+            name_en_filter,
+            name_zh_filter,
+            category,
+            time,
+        )
+
+        response_data = {
+            'records': data,
+            'total': total,
+            'size': page_size,
+            'current': page,
+        }
         res = success(response_data, "success")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except Exception as e:
@@ -129,11 +133,9 @@ def data_standard_list():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标准的图谱(血缘关系Kinship+影响关系Impact+所有关系all)
 @bp.route('/data/standard/graph/all', methods=['POST'])
 def data_standard_graph_all():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         nodeid = receiver['id']
         type = receiver['type']  # kinship/impact/all
@@ -143,20 +145,22 @@ def data_standard_graph_all():
             result = interface.standard_impact_graph(nodeid)
         else:
             result = interface.standard_all_graph(nodeid)
-        return json.dumps(success(result, "success"), ensure_ascii=False, cls=MyEncoder)
+        return json.dumps(
+            success(result, "success"),
+            ensure_ascii=False,
+            cls=MyEncoder,
+        )
     except Exception as e:
         res = failed(str(e), 500, {})
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标签新增 DataLabel
 @bp.route('/data/label/add', methods=['POST'])
 def data_label_add():
     try:
-        # 传入请求参数
         receiver = request.get_json()
-        name_zh = receiver['name_zh']  # 中文名称
-        name_en = translate_and_parse(name_zh)  # 英文名
+        name_zh = receiver['name_zh']
+        name_en = translate_and_parse(name_zh)
         receiver['name_en'] = name_en[0]
         receiver['create_time'] = get_formatted_time()
         create_or_get_node('DataLabel', **receiver)
@@ -169,15 +173,13 @@ def data_label_add():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标签详情 DataLabel
 @bp.route('/data/label/detail', methods=['POST'])
 def data_label_detail():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         nodeid = receiver['id']  # id
 
-        cql = """MATCH (n:DataLabel) where id(n) = $nodeId 
+        cql = """MATCH (n:DataLabel) where id(n) = $nodeId
                   RETURN properties(n) as property"""
         with connect_graph().session() as session:
             result = session.run(cql, nodeId=nodeid).single()
@@ -192,11 +194,9 @@ def data_label_detail():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标签列表展示(分类,名称,时间检索)
 @bp.route('/data/label/list', methods=['POST'])
 def data_label_list():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         page = int(receiver.get('current', 1))
         page_size = int(receiver.get('size', 10))
@@ -205,13 +205,68 @@ def data_label_list():
         category = receiver.get('category', None)
         group = receiver.get('group', None)
 
-        # 计算跳过的记录的数量
         skip_count = (page - 1) * page_size
 
-        data, total = interface.label_list(skip_count, page_size, name_en_filter,
-                                           name_zh_filter, category, group)
+        data, total = interface.label_list(
+            skip_count,
+            page_size,
+            name_en_filter,
+            name_zh_filter,
+            category,
+            group,
+        )
+
+        response_data = {
+            'records': data,
+            'total': total,
+            'size': page_size,
+            'current': page,
+        }
+        res = success(response_data, "success")
+        return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
+    except Exception as e:
+        res = failed(str(e), 500, {})
+        return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
+
+
+@bp.route('/labellist', methods=['POST'])
+def interface_label_list():
+    """获取 DataLabel 列表(支持多条件 category_filter 过滤)"""
+    try:
+        receiver = request.get_json() or {}
+        page = int(receiver.get('current', 1))
+        page_size = int(receiver.get('size', 10))
+        name_en_filter = receiver.get('name_en')
+        name_zh_filter = receiver.get('name_zh')
+        category_filter = receiver.get('category_filter')
+        group_filter = receiver.get('group')
 
-        response_data = {'records': data, 'total': total, 'size': page_size, 'current': page}
+        skip_count = (page - 1) * page_size
+        data, total = interface.label_list(
+            skip_count,
+            page_size,
+            name_en_filter,
+            name_zh_filter,
+            category_filter,
+            group_filter,
+        )
+
+        # 只保留 id, name_zh, name_en 三个字段
+        records = [
+            {
+                'id': item.get('id'),
+                'name_zh': item.get('name_zh'),
+                'name_en': item.get('name_en'),
+            }
+            for item in data
+        ]
+
+        response_data = {
+            'records': records,
+            'total': total,
+            'size': page_size,
+            'current': page,
+        }
         res = success(response_data, "success")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
     except Exception as e:
@@ -219,11 +274,31 @@ def data_label_list():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 24.11.19 数据标签动态识别分组
+@bp.route('/graphall', methods=['POST'])
+def interface_graph_all():
+    """获取完整关系图谱"""
+    try:
+        receiver = request.get_json() or {}
+        domain_id = receiver.get("id")
+        include_meta = receiver.get("meta", True)
+
+        if domain_id is None:
+            return jsonify(failed("节点ID不能为空"))
+
+        try:
+            domain_id = int(domain_id)
+        except (ValueError, TypeError):
+            return jsonify(failed(f"节点ID必须为整数, 收到的是: {domain_id}"))
+
+        graph_data = interface.graph_all(domain_id, include_meta)
+        return jsonify(success(graph_data))
+    except Exception as e:
+        return jsonify(failed("获取图谱失败", error=str(e)))
+
+
 @bp.route('/data/label/dynamic/identify', methods=['POST'])
 def data_label_dynamic_identify():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         name_filter = receiver.get('content', None)
 
@@ -236,11 +311,9 @@ def data_label_dynamic_identify():
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 数据标签的图谱(血缘关系Kinship+影响关系Impact+所有关系all)
 @bp.route('/data/label/graph/all', methods=['POST'])
 def data_label_graph():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         nodeid = receiver['id']
         type = receiver['type']  # kinship/impact/all
@@ -249,30 +322,31 @@ def data_label_graph():
         elif type == 'impact':
             result = interface.label_impact_graph(nodeid)
         else:
-            result = interface.label_kinship_graph(nodeid)  # 对于标签,将all和kinship都视为相同处理
-        return json.dumps(success(result, "success"), ensure_ascii=False, cls=MyEncoder)
+            result = interface.label_kinship_graph(nodeid)
+        return json.dumps(
+            success(result, "success"),
+            ensure_ascii=False,
+            cls=MyEncoder,
+        )
     except Exception as e:
         res = failed(str(e), 500, {})
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
 
 
-# 删除标签、标准、指标间的关系
 @bp.route('/metric/label/standard/delete', methods=['POST'])
 def metric_label_standard_delete():
     try:
-        # 传入请求参数
         receiver = request.get_json()
         sourceid = receiver['sourceid']
         targetid = receiver['targetid']
 
-        # 查询语句,查询两个节点之间的关系
         cql = """
         MATCH (source)-[r]-(target)
         WHERE id(source) = $sourceid AND id(target) = $targetid
         DELETE r
         """
         with connect_graph().session() as session:
-            result = session.run(cql, sourceid=sourceid, targetid=targetid)
+            session.run(cql, sourceid=sourceid, targetid=targetid)
 
         res = success("", "success")
         return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
@@ -283,49 +357,40 @@ def metric_label_standard_delete():
 
 @bp.route('/data/label/delete', methods=['POST'])
 def data_label_delete():
-    """
-    删除数据标签节点
-    
-    请求参数:
-    - id: 节点ID
-    
-    返回:
-    - 删除结果状态信息
-    """
+    """Delete data label node"""
     try:
-        # 获取请求参数
         receiver = request.get_json()
-        node_id = receiver.get('id')
-        
-        # 验证参数
+        node_id = receiver.get('id') if receiver else None
+
         if not node_id:
-            res = failed("节点ID不能为空", 400, {})
-            return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-        
-        # 转换为整数
+            return jsonify(failed("node id is required", 400, {}))
+
         try:
             node_id = int(node_id)
         except (ValueError, TypeError):
-            res = failed("节点ID必须为整数", 400, {})
-            return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-        
-        # 调用核心业务逻辑执行删除
+            return jsonify(failed("node id must be an integer", 400, {}))
+
         delete_result = interface.node_delete(node_id)
-        
-        # 根据删除结果返回响应
-        if delete_result["success"]:
-            res = success({
-                "id": node_id,
-                "message": delete_result["message"]
-            }, "删除成功")
-            return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
+        message = delete_result.get("message") or ""
+
+        if delete_result.get("success"):
+            res = success(
+                {
+                    "id": node_id,
+                    "message": message,
+                },
+                "delete success",
+            )
         else:
-            res = failed(delete_result["message"], 500, {
-                "id": node_id,
-                "message": delete_result["message"]
-            })
-            return json.dumps(res, ensure_ascii=False, cls=MyEncoder)
-            
+            res = failed(
+                message,
+                500,
+                {
+                    "id": node_id,
+                    "message": message,
+                },
+            )
+
+        return jsonify(res)
     except Exception as e:
-        res = failed(f"删除失败: {str(e)}", 500, {})
-        return json.dumps(res, ensure_ascii=False, cls=MyEncoder) 
+        return jsonify(failed(f"delete failed: {str(e)}", 500, {}))

+ 235 - 155
app/api/meta_data/routes.py

@@ -53,31 +53,47 @@ def allowed_file(filename):
 @bp.route('/node/list', methods=['POST'])
 def meta_node_list():
     try:
-        if not request.json:
-            return jsonify(failed("请求数据不能为空"))
-        # 从请求中获取分页参数
-        page = int(request.json.get('current', 1))
-        page_size = int(request.json.get('size', 10))
-        # 获取搜索参数
-        search = request.json.get('search', '')
-        name_en_filter = request.json.get('name_en', None)
-        name_zh_filter = request.json.get('name_zh', None)
-        category_filter = request.json.get('category', None)
-        time_filter = request.json.get('time', None)
-        tag_filter = request.json.get('tag', None)
-        
+        payload = request.get_json() or {}
+        if not isinstance(payload, dict):
+            return jsonify(failed("请求数据格式错误,应为 JSON 对象"))
+
+        def to_int(value, default):
+            try:
+                return int(value)
+            except (TypeError, ValueError):
+                return default
+
+        # 分页参数
+        page = to_int(payload.get('current', 1), 1)
+        page_size = to_int(payload.get('size', 10), 10)
+
+        # 过滤参数
+        name_en_filter = payload.get('name_en') or None
+        name_zh_filter = payload.get('name_zh') or None
+        category_filter = payload.get('category') or None
+        time_filter = payload.get('time') or None
+
+        logger.info(
+            f"[node/list] 过滤参数: name_zh={name_zh_filter}, "
+            f"name_en={name_en_filter}, category={category_filter}"
+        )
+
+        tag_filter = payload.get('tag')
+        if tag_filter is not None and not isinstance(tag_filter, list):
+            tag_filter = None
+
         # 调用核心业务逻辑
         result, total_count = meta_list(
-            page, 
-            page_size, 
-            search, 
-            name_en_filter, 
-            name_zh_filter, 
-            category_filter, 
-            time_filter, 
+            page,
+            page_size,
+            '',
+            name_en_filter,
+            name_zh_filter,
+            category_filter,
+            time_filter,
             tag_filter
         )
-        
+
         # 返回结果
         return jsonify(success({
             "records": result,
@@ -98,12 +114,35 @@ def meta_node_graph():
             return jsonify(failed("请求数据不能为空"))
         # 从请求中获取节点ID
         node_id = request.json.get('nodeId')
-        
+        if node_id is None:
+            return jsonify(failed("nodeId 不能为空"))
+
+        try:
+            node_id_int = int(node_id)
+        except (TypeError, ValueError):
+            return jsonify(failed("nodeId 必须为整数"))
+
         # 调用核心业务逻辑
-        result = meta_kinship_graph(node_id)
-        
-        # 返回结果
-        return jsonify(success(result))
+        graph = meta_kinship_graph(node_id_int)
+        is_dict = isinstance(graph, dict)
+        nodes = graph.get("nodes", []) if is_dict else []
+        relationships = graph.get("relationships", []) if is_dict else []
+
+        # 当前节点属性
+        node_info = next(
+            (n for n in nodes if n.get("id") == node_id_int),
+            {},
+        )
+        # 关联节点(包含属性,便于前端展示名称等)
+        related_nodes = [n for n in nodes if n.get("id") != node_id_int]
+
+        payload = {
+            "node": node_info,
+            "related_nodes": related_nodes,
+            "relationships": relationships,
+        }
+
+        return jsonify(success(payload))
     except Exception as e:
         logger.error(f"获取元数据图谱失败: {str(e)}")
         return jsonify(failed(str(e)))
@@ -117,12 +156,12 @@ def meta_node_delete():
             return jsonify(failed("请求数据不能为空"))
         # 从请求中获取节点ID
         node_id = request.json.get('id')
-        
+
         # 删除节点逻辑
         with neo4j_driver.get_session() as session:
             cypher = "MATCH (n) WHERE id(n) = $node_id DETACH DELETE n"
             session.run(cypher, node_id=int(node_id))
-        
+
         # 返回结果
         return jsonify(success({}))
     except Exception as e:
@@ -138,10 +177,10 @@ def meta_node_edit():
             return jsonify(failed("请求数据不能为空"))
         # 从请求中获取节点ID
         node_id = request.json.get('id')
-        
+
         if not node_id:
             return jsonify(failed("节点ID不能为空"))
-        
+
         # 获取节点
         with neo4j_driver.get_session() as session:
             # 查询节点信息
@@ -151,15 +190,15 @@ def meta_node_edit():
             RETURN n
             """
             result = session.run(cypher, node_id=int(node_id))
-            
+
             node = result.single()
             if not node or not node["n"]:
                 return jsonify(failed("节点不存在"))
-            
+
             # 获取节点数据
             node_data = dict(node["n"])
             node_data["id"] = node["n"].id
-            
+
             # 获取标签信息
             tag_cypher = """
             MATCH (n:DataMeta)-[:LABEL]->(t:DataLabel)
@@ -167,8 +206,16 @@ def meta_node_edit():
             RETURN t
             """
             tag_result = session.run(tag_cypher, node_id=int(node_id))
-            tag = tag_result.single()
-            
+            tags: list[dict] = []
+            for record in tag_result:
+                tag_node = record.get("t")
+                if tag_node:
+                    tags.append({
+                        "id": tag_node.id,
+                        "name_zh": tag_node.get("name_zh", ""),
+                        "name_en": tag_node.get("name_en", ""),
+                    })
+
             # 获取主数据信息
             master_data_cypher = """
             MATCH (n:DataMeta)-[:master_data]->(m:master_data)
@@ -179,7 +226,7 @@ def meta_node_edit():
                 master_data_cypher, node_id=int(node_id)
             )
             master_data = master_data_result.single()
-            
+
             # 构建返回数据
             response_data = [{
                 "master_data": (
@@ -192,26 +239,16 @@ def meta_node_edit():
                 "update_time": node_data.get("update_time", ""),
                 "status": bool(node_data.get("status", True)),
                 "data_type": node_data.get("data_type", ""),
-                "tag": {
-                    "name_zh": (
-                        tag["t"].get("name_zh", "")
-                        if tag and tag["t"] else None
-                    ),
-                    "name_en": (
-                        tag["t"].get("name_en", "")
-                        if tag and tag["t"] else None
-                    ),
-                    "id": tag["t"].id if tag and tag["t"] else None
-                },
+                "tag": tags,
                 "affiliation": node_data.get("affiliation"),
                 "category": node_data.get("category"),
                 "alias": node_data.get("alias"),
                 "describe": node_data.get("describe")
             }]
-            
+
             logger.info(f"成功获取元数据节点: ID={node_data['id']}")
             return jsonify(success(response_data))
-            
+
     except Exception as e:
         logger.error(f"获取元数据节点失败: {str(e)}")
         return jsonify(failed(str(e)))
@@ -222,19 +259,19 @@ def meta_node_edit():
 def meta_check():
     """
     检查元数据中文名是否已存在
-    
+
     请求参数:
     - name_zh: 元数据中文名(URL参数)
-    
+
     返回:
     - exists: true/false 表示是否存在
     """
     try:
         name_zh = request.args.get('name_zh')
-        
+
         if not name_zh:
             return jsonify(failed("缺少name_zh参数"))
-        
+
         # 查询数据库检查是否存在
         with neo4j_driver.get_session() as session:
             cypher = """
@@ -243,7 +280,7 @@ def meta_check():
             """
             result = session.run(cypher, name_zh=name_zh)
             record = result.single()
-            
+
             if record:
                 exists = record["exists"]
                 logger.info(f"检查元数据 '{name_zh}': {'存在' if exists else '不存在'}")
@@ -256,7 +293,7 @@ def meta_check():
                     "exists": False,
                     "name_zh": name_zh
                 }, "查询成功"))
-                
+
     except Exception as e:
         logger.error(f"检查元数据失败: {str(e)}")
         return jsonify(failed(f"检查失败: {str(e)}"))
@@ -277,13 +314,13 @@ def meta_node_add():
         node_desc = request.json.get('describe')
         node_status = bool(request.json.get('status', True))
         node_name_en = request.json.get('name_en')
-        
+
         if not node_name_zh:
             return jsonify(failed("节点名称不能为空"))
-            
+
         if not node_type:
             return jsonify(failed("节点类型不能为空"))
-        
+
         # 创建节点
         with neo4j_driver.get_session() as session:
             cypher = """
@@ -310,7 +347,7 @@ def meta_node_add():
             """
             create_time = update_time = get_formatted_time()
             result = session.run(
-                cypher, 
+                cypher,
                 name_zh=node_name_zh,
                 data_type=node_type,
                 category=node_category,
@@ -322,25 +359,62 @@ def meta_node_add():
                 status=node_status,
                 name_en=node_name_en
             )
-            
+
             node = result.single()
             if node and node["n"]:
                 node_data = dict(node["n"])
                 node_data["id"] = node["n"].id
-                
-                # 如果提供了标签ID,创建标签关系
+
+                # 如果提供了标签列表,创建标签关系
+                tag_nodes = []
                 if node_tag:
-                    tag_cypher = """
-                    MATCH (n:DataMeta), (t:DataLabel)
-                    WHERE id(n) = $node_id AND id(t) = $tag_id
-                    MERGE (n)-[r:LABEL]->(t)
-                    RETURN r
-                    """
-                    session.run(
-                        tag_cypher,
-                        node_id=node["n"].id,
-                        tag_id=int(node_tag)
+                    tag_items = (
+                        node_tag if isinstance(node_tag, list) else [node_tag]
                     )
+                    for tag_item in tag_items:
+                        tag_id = (
+                            tag_item.get("id")
+                            if isinstance(tag_item, dict)
+                            else tag_item
+                        )
+                        if tag_id is None:
+                            logger.warning(f"标签ID无效: {tag_item}")
+                            continue
+                        try:
+                            tag_id = int(tag_id)
+                        except (TypeError, ValueError):
+                            logger.warning(f"标签ID无效: {tag_item}")
+                            continue
+
+                        # 获取标签节点信息
+                        tag_fetch = session.run(
+                            "MATCH (t:DataLabel) "
+                            "WHERE id(t) = $tag_id RETURN t",
+                            tag_id=tag_id,
+                        ).single()
+                        if not tag_fetch or not tag_fetch.get("t"):
+                            logger.warning(f"未找到标签节点: {tag_id}")
+                            continue
+                        tag_node = tag_fetch["t"]
+                        tag_nodes.append({
+                            "id": tag_node.id,
+                            "name_zh": tag_node.get("name_zh", ""),
+                            "name_en": tag_node.get("name_en", ""),
+                        })
+
+                        tag_cypher = """
+                        MATCH (n:DataMeta), (t:DataLabel)
+                        WHERE id(n) = $node_id AND id(t) = $tag_id
+                        MERGE (n)-[r:LABEL]->(t)
+                        RETURN r
+                        """
+                        session.run(
+                            tag_cypher,
+                            node_id=node["n"].id,
+                            tag_id=tag_id
+                        )
+
+                node_data["tag"] = tag_nodes
 
                 logger.info(
                     f"成功创建或更新元数据节点: "
@@ -362,17 +436,17 @@ def search_metadata_route():
         keyword = request.args.get('keyword', '')
         if not keyword:
             return jsonify(success([]))
-            
+
         cypher = """
-        MATCH (n:DataMeta) 
+        MATCH (n:DataMeta)
         WHERE n.name_zh CONTAINS $keyword
         RETURN n LIMIT 100
         """
-        
+
         with neo4j_driver.get_session() as session:
             result = session.run(cypher, keyword=keyword)
             metadata_list = [dict(record["n"]) for record in result]
-            
+
         return jsonify(success(metadata_list))
     except Exception as e:
         logger.error(f"搜索元数据失败: {str(e)}")
@@ -401,7 +475,7 @@ def full_text_query():
             """
 
             result = session.run(cypher, term=search_term)
-            
+
             # 处理查询结果
             search_results = []
             for record in result:
@@ -409,7 +483,7 @@ def full_text_query():
                 node_data["id"] = record["node"].id
                 node_data["score"] = record["score"]
                 search_results.append(node_data)
-                
+
             return jsonify(success(search_results))
     except Exception as e:
         logger.error(f"全文检索查询失败: {str(e)}")
@@ -426,28 +500,28 @@ def unstructure_text_query():
         node_id = request.json.get('id')
         if not node_id:
             return jsonify(failed("节点ID不能为空"))
-            
+
         # 获取节点信息
         node_data = handle_id_unstructured(node_id)
         if not node_data:
             return jsonify(failed("节点不存在"))
-            
+
         # 获取对象路径
         object_name = node_data.get('url')
         if not object_name:
             return jsonify(failed("文档路径不存在"))
-            
+
         # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
         bucket_name = config['MINIO_BUCKET']
-            
+
         # 从MinIO获取文件内容
         file_content = get_file_content(minio_client, bucket_name, object_name)
-        
+
         # 解析文本内容
         parsed_data = parse_text(file_content)
-        
+
         # 返回结果
         result = {
             "node": node_data,
@@ -457,7 +531,7 @@ def unstructure_text_query():
                 if len(file_content) > 1000 else file_content
             )
         }
-        
+
         return jsonify(success(result))
     except Exception as e:
         logger.error(f"非结构化文本查询失败: {str(e)}")
@@ -471,7 +545,7 @@ def upload_file():
         # 检查请求中是否有文件
         if 'file' not in request.files:
             return jsonify(failed("没有找到上传的文件"))
-            
+
         file = request.files['file']
 
         # 检查文件名
@@ -496,26 +570,26 @@ def upload_file():
 
         # 提取文件名(不包含扩展名)
         filename_without_ext = filename.rsplit('.', 1)[0]
-        
+
         # 生成紧凑的时间戳 (yyyyMMddHHmmss)
         import time
         timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
-        
+
         # 生成唯一文件名
         object_name = (
             f"{config['PREFIX']}/"
             f"{filename_without_ext}_{timestamp}.{file_type}"
         )
-        
+
         # 上传文件
         minio_client.put_object(
-            config['MINIO_BUCKET'], 
+            config['MINIO_BUCKET'],
             object_name,
             io.BytesIO(file_content),
             file_size,
             content_type=f"application/{file_type}"
         )
-        
+
         # 返回结果
         return jsonify(success({
             "filename": file.filename,
@@ -538,21 +612,21 @@ def upload_file_display():
         object_name = request.json.get('url')
         if not object_name:
             return jsonify(failed("文件路径不能为空"))
-            
+
         # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
-            
+
         # 获取文件内容
         response = minio_client.get_object(config['MINIO_BUCKET'], object_name)
         file_data = response.read()
-        
+
         # 获取文件名
         file_name = object_name.split('/')[-1]
-        
+
         # 确定文件类型
         file_extension = file_name.split('.')[-1].lower()
-        
+
         # 为不同文件类型设置合适的MIME类型
         mime_types = {
             'pdf': 'application/pdf',
@@ -569,11 +643,11 @@ def upload_file_display():
             'txt': 'text/plain',
             'csv': 'text/csv'
         }
-        
+
         content_type = mime_types.get(
             file_extension, 'application/octet-stream'
         )
-        
+
         # 返回结果
         return jsonify(success({
             "filename": file_name,
@@ -602,18 +676,18 @@ def download_file():
         object_name = request.args.get('url')
         if not object_name:
             return jsonify(failed("文件路径不能为空"))
-            
+
         # URL解码,处理特殊字符
         import urllib.parse
         object_name = urllib.parse.unquote(object_name)
-        
+
         # 记录下载请求信息,便于调试
         logger.info(f"下载文件请求: {object_name}")
-        
+
         # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
-        
+
         # 获取文件
         try:
             response = minio_client.get_object(
@@ -623,13 +697,13 @@ def download_file():
         except S3Error as e:
             logger.error(f"MinIO获取文件失败: {str(e)}")
             return jsonify(failed(f"文件获取失败: {str(e)}"))
-        
+
         # 获取文件名,并处理特殊字符
         file_name = object_name.split('/')[-1]
-        
+
         # 直接从内存返回文件,不创建临时文件
         file_stream = io.BytesIO(file_data)
-        
+
         # 返回文件
         return send_file(
             file_stream,
@@ -655,13 +729,13 @@ def text_resource_translate():
         # 获取参数
         name_zh = request.json.get('name_zh', '')
         keyword = request.json.get('keyword', '')
-        
+
         if not name_zh:
             return jsonify(failed("名称不能为空"))
-            
+
         # 调用资源处理逻辑
         result = text_resource_solve(None, name_zh, keyword)
-        
+
         return jsonify(success(result))
     except Exception as e:
         logger.error(f"文本资源翻译失败: {str(e)}")
@@ -680,10 +754,10 @@ def text_resource_node():
         keywords = request.json.get('keywords', [])
         keywords_en = request.json.get('keywords_en', [])
         object_name = request.json.get('url', '')
-        
+
         if not name_zh or not name_en or not object_name:
             return jsonify(failed("参数不完整"))
-            
+
         # 创建节点
         with neo4j_driver.get_session() as session:
             # 创建资源节点
@@ -699,7 +773,7 @@ def text_resource_node():
             })
             RETURN n
             """
-            
+
             create_time = update_time = get_formatted_time()
             result = session.run(
                 cypher,
@@ -716,7 +790,7 @@ def text_resource_node():
             if not record:
                 return jsonify(failed("创建节点失败"))
             node = record["n"]
-            
+
             # 为每个关键词创建标签节点并关联
             for i, keyword in enumerate(keywords):
                 if keyword:
@@ -727,7 +801,7 @@ def text_resource_node():
                                   t.create_time = $create_time
                     RETURN t
                     """
-                    
+
                     tag_result = session.run(
                         tag_cypher,
                         name_zh=keyword,
@@ -739,7 +813,7 @@ def text_resource_node():
                     if not tag_record:
                         continue
                     tag_node = tag_record["t"]
-                    
+
                     # 创建关系
                     rel_cypher = """
                     MATCH (n), (t)
@@ -747,13 +821,13 @@ def text_resource_node():
                     CREATE (n)-[r:HAS_TAG]->(t)
                     RETURN r
                     """
-                    
+
                     session.run(
                         rel_cypher,
                         node_id=node.id,
                         tag_id=tag_node.id
                     )
-            
+
             # 返回创建的节点
             return jsonify(success(dict(node)))
     except Exception as e:
@@ -771,15 +845,15 @@ def processing_unstructured_data():
         node_id = request.json.get('id')
         if not node_id:
             return jsonify(failed("节点ID不能为空"))
-            
+
         # 获取 MinIO 配置
         minio_client = get_minio_client()
         config = get_minio_config()
         prefix = config['PREFIX']
-            
+
         # 调用处理逻辑
         result = solve_unstructured_data(node_id, minio_client, prefix)
-        
+
         if result:
             return jsonify(success({"message": "处理成功"}))
         else:
@@ -805,7 +879,7 @@ def create_text_graph():
 
         # 创建图谱
         result = handle_txt_graph(node_id, entity_zh, entity_en)
-        
+
         if result:
             return jsonify(success({"message": "图谱创建成功"}))
         else:
@@ -835,16 +909,16 @@ def meta_node_update():
             return jsonify(failed("请求数据不能为空"))
         # 从请求中获取节点ID和更新数据
         node_id = request.json.get('id')
-        
+
         if not node_id:
             return jsonify(failed("节点ID不能为空"))
-        
+
         # 验证并转换节点ID为整数
         try:
             node_id = int(node_id)
         except (ValueError, TypeError):
             return jsonify(failed(f"节点ID必须为整数,当前值: {node_id}"))
-        
+
         # 更新节点
         with neo4j_driver.get_session() as session:
             # 检查节点是否存在并获取当前值
@@ -855,7 +929,7 @@ def meta_node_update():
             """
             result = session.run(check_cypher, node_id=node_id)
             node = result.single()
-            
+
             if not node or not node["n"]:
                 return jsonify(failed("节点不存在"))
 
@@ -865,13 +939,13 @@ def meta_node_update():
             WHERE id(n) = $node_id
             SET n.updateTime = $update_time
             """
-            
+
             # 准备更新参数
             update_params = {
                 'node_id': node_id,
                 'update_time': get_formatted_time()
             }
-            
+
             # 处理每个可能的更新字段
             fields_to_update = {
                 'name_zh': request.json.get('name_zh'),
@@ -883,7 +957,7 @@ def meta_node_update():
                 'status': request.json.get('status'),
                 'name_en': request.json.get('name_en')
             }
-            
+
             # 只更新提供了新值的字段
             for field, new_value in fields_to_update.items():
                 if new_value is not None:
@@ -893,20 +967,20 @@ def meta_node_update():
                     else:
                         update_cypher += f", n.{field} = ${field}\n"
                     update_params[field] = new_value
-            
+
             update_cypher += "RETURN n"
-            
+
             result = session.run(
                 update_cypher,  # type: ignore[arg-type]
                 **update_params
             )
-            
+
             updated_node = result.single()
             if updated_node and updated_node["n"]:
                 node_data = dict(updated_node["n"])
                 node_data["id"] = updated_node["n"].id
-                
-                # 如果更新了标签,处理标签关系
+
+                # 如果更新了标签,处理标签关系(支持列表)
                 tag = request.json.get('tag')
                 if tag is not None:
                     # 先删除现有标签关系
@@ -916,35 +990,41 @@ def meta_node_update():
                     DELETE r
                     """
                     session.run(delete_tag_cypher, node_id=node_id)
-                    
-                    # 创建新的标签关系
-                    is_valid_tag = (
-                        tag and isinstance(tag, dict)
-                        and 'id' in tag and tag['id']
-                    )
-                    if is_valid_tag:
+
+                    tag_items = tag if isinstance(tag, list) else [tag]
+                    for tag_item in tag_items:
+                        tag_id = (
+                            tag_item.get('id')
+                            if isinstance(tag_item, dict)
+                            else tag_item
+                        )
+                        if tag_id is None:
+                            logger.warning(f"标签ID无效: {tag_item}")
+                            continue
                         try:
-                            tag_id = int(tag['id'])
-                            create_tag_cypher = """
-                            MATCH (n:DataMeta), (t:DataLabel)
-                            WHERE id(n) = $node_id AND id(t) = $tag_id
-                            MERGE (n)-[r:LABEL]->(t)
-                            RETURN r
-                            """
-                            session.run(
-                                create_tag_cypher,
-                                node_id=node_id,
-                                tag_id=tag_id
-                            )
+                            tag_id = int(tag_id)
                         except (ValueError, TypeError):
-                            logger.warning(f"标签ID无效: {tag.get('id')}")
-                
+                            logger.warning(f"标签ID无效: {tag_item}")
+                            continue
+
+                        create_tag_cypher = """
+                        MATCH (n:DataMeta), (t:DataLabel)
+                        WHERE id(n) = $node_id AND id(t) = $tag_id
+                        MERGE (n)-[r:LABEL]->(t)
+                        RETURN r
+                        """
+                        session.run(
+                            create_tag_cypher,
+                            node_id=node_id,
+                            tag_id=tag_id
+                        )
+
                 logger.info(f"成功更新元数据节点: ID={node_data['id']}")
                 return jsonify(success(node_data))
             else:
                 logger.error(f"更新元数据节点失败: ID={node_id}")
                 return jsonify(failed("更新元数据节点失败"))
-                
+
     except Exception as e:
         logger.error(f"更新元数据失败: {str(e)}")
-        return jsonify(failed(str(e))) 
+        return jsonify(failed(str(e)))

文件差异内容过多而无法显示
+ 365 - 316
app/core/business_domain/business_domain.py


文件差异内容过多而无法显示
+ 420 - 211
app/core/data_flow/dataflows.py


+ 524 - 137
app/core/data_interface/interface.py

@@ -9,17 +9,69 @@
 """
 
 import logging
+import re
 from app.core.graph.graph_operations import connect_graph
+from app.services.neo4j_driver import neo4j_driver
 
 # 配置logger
 logger = logging.getLogger(__name__)
 
+
+def _build_category_filter_conditions(category_filter, params):
+    """
+    将 category_filter 转换为 Cypher 查询条件列表。
+    支持:
+    - 字典: {field: value, ...}
+    - 列表: [{"field": "...", "value": "..."}, {"category": "xxx"}]
+    - 字符串: 兼容旧用法,等同于按 category 字段过滤
+    """
+    conditions = []
+    param_index = 0
+
+    def add_condition(field, value):
+        nonlocal param_index
+        if value is None:
+            return
+        if not isinstance(field, str):
+            return
+        if not re.match(r"^[A-Za-z_][A-Za-z0-9_]*$", field):
+            logger.warning(f"忽略非法属性字段: {field}")
+            return
+        param_key = f"category_filter_{param_index}"
+        param_index += 1
+        conditions.append(f"n.{field} CONTAINS ${param_key}")
+        params[param_key] = value
+
+    if isinstance(category_filter, dict):
+        for field, value in category_filter.items():
+            add_condition(field, value)
+    elif isinstance(category_filter, list):
+        for item in category_filter:
+            if not isinstance(item, dict):
+                continue
+            if "field" in item and "value" in item:
+                add_condition(item.get("field"), item.get("value"))
+            elif len(item) == 1:
+                field, value = next(iter(item.items()))
+                add_condition(field, value)
+    elif category_filter:
+        add_condition("category", category_filter)
+
+    return conditions
+
+
 # 数据标准列表展示
-def standard_list(skip_count, page_size, name_en_filter=None,
-                  name_zh_filter=None, category_filter=None, create_time_filter=None):
+def standard_list(
+    skip_count,
+    page_size,
+    name_en_filter=None,
+    name_zh_filter=None,
+    category_filter=None,
+    create_time_filter=None,
+):
     """
     获取数据标准列表
-    
+
     Args:
         skip_count: 跳过的记录数量
         page_size: 每页记录数量
@@ -27,7 +79,7 @@ def standard_list(skip_count, page_size, name_en_filter=None,
         name_zh_filter: 名称过滤条件
         category_filter: 分类过滤条件
         create_time_filter: 时间过滤条件
-        
+
     Returns:
         tuple: (数据列表, 总记录数)
     """
@@ -57,15 +109,18 @@ def standard_list(skip_count, page_size, name_en_filter=None,
     cql = f"""
     MATCH (n:data_standard)
     WHERE {where_str}
-    RETURN properties(n) as properties,n.create_time as create_time,id(n) as nodeid,
-           size([(n)<-[]-() | 1]) + size([(n)-[]->() | 1]) as relationship_count
+    RETURN
+        properties(n) as properties,
+        n.create_time as create_time,
+        id(n) as nodeid,
+        size([(n)<-[]-() | 1]) + size([(n)-[]->() | 1]) as relationship_count
     ORDER BY create_time desc
     SKIP $skip_count
     LIMIT $page_size
     """
     params['skip_count'] = skip_count
     params['page_size'] = page_size
-    
+
     # 修复:使用正确的session方式执行查询
     driver = None
     try:
@@ -87,10 +142,14 @@ def standard_list(skip_count, page_size, name_en_filter=None,
                 data.append(properties)
 
             # 获取总量
-            total_query = f"MATCH (n:data_standard) WHERE {where_str} RETURN COUNT(n) AS total"
-            total_result = session.run(total_query, **params).single()["total"]
-            
-            return data, total_result
+            total_query = (
+                f"MATCH (n:data_standard) WHERE {where_str} "
+                "RETURN COUNT(n) AS total"
+            )
+            total_record = session.run(total_query, **params).single()
+            total = total_record["total"] if total_record else 0
+
+            return data, total
     except (ConnectionError, ValueError) as e:
         logger.error(f"Neo4j数据库连接失败: {str(e)}")
         return [], 0
@@ -103,10 +162,10 @@ def standard_list(skip_count, page_size, name_en_filter=None,
 def standard_kinship_graph(nodeid):
     """
     生成数据标准的血缘关系图谱
-    
+
     Args:
         nodeid: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -116,19 +175,32 @@ def standard_kinship_graph(nodeid):
     WHERE id(da)=$nodeId
     OPTIONAL MATCH(a:DataResource)-[:clean_resource]-(da)
     OPTIONAL MATCH(b:DataModel)-[:clean_model]-(da)
-    WITH 
-        collect({id:toString(id(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
-        collect({id:toString(id(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
-        collect({id:toString(id(da)),text:da.name,type:split(labels(da)[0],'_')[1]}) as nodes,da,
+    WITH
+        collect({
+            id:toString(id(a)),
+            text:a.name,
+            type:split(labels(a)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(b)),
+            text:b.name,
+            type:split(labels(b)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(da)),
+            text:da.name,
+            type:split(labels(da)[0],'_')[1]
+        }) as nodes,
+        da,
         collect({from:toString(id(a)),to:toString(id(da)),text:'标准'})+
-        collect({from:toString(id(b)),to:toString(id(da)),text:'标准'})as lines
-    WITH  
+        collect({from:toString(id(b)),to:toString(id(da)),text:'标准'}) as lines
+    WITH
         toString(id(da)) as rootId,
         apoc.coll.toSet(lines) as lines,
         apoc.coll.toSet(nodes) as nodes
     RETURN nodes,lines,rootId
     """
-    
+
     driver = None
     try:
         driver = connect_graph()
@@ -137,8 +209,14 @@ def standard_kinship_graph(nodeid):
             res = {}
             for item in result:
                 res = {
-                    "nodes": [record for record in item['nodes'] if record['id']],
-                    "lines": [record for record in item['lines'] if record['from'] and record['to']],
+                    "nodes": [
+                        record for record in item['nodes'] if record['id']
+                    ],
+                    "lines": [
+                        record
+                        for record in item['lines']
+                        if record['from'] and record['to']
+                    ],
                     "rootId": item['rootId']
                 }
             return res
@@ -154,10 +232,10 @@ def standard_kinship_graph(nodeid):
 def standard_impact_graph(nodeid):
     """
     生成数据标准的影响关系图谱
-    
+
     Args:
         nodeid: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -167,19 +245,32 @@ def standard_impact_graph(nodeid):
         WHERE id(da)=$nodeId
         OPTIONAL MATCH(da)-[:clean_model]-(m1:DataMeta)-[:clean_model]-(da)
         OPTIONAL MATCH(da)-[:clean_model]-(m2:DataMeta)-[:clean_model]-(da)
-        WITH 
-            collect({id:toString(id(da)),text:da.name,type:split(labels(da)[0],'_')[1]})+
+        WITH
+            collect({
+                id:toString(id(da)),
+                text:da.name,
+                type:split(labels(da)[0],'_')[1]
+            })+
             collect({id:toString(id(m1)),text:m1.name})+
-            collect({id:toString(id(m2)),text:m2.name})as nodes,da,
-            collect({from:toString(id(da)),to:toString(id(m1)),text:'标准清洗'})+
-            collect({from:toString(id(da)),to:toString(id(m2)),text:'标准清洗'})as lines
-        WITH  
+            collect({id:toString(id(m2)),text:m2.name}) as nodes,
+            da,
+            collect({
+                from:toString(id(da)),
+                to:toString(id(m1)),
+                text:'标准清洗'
+            })+
+            collect({
+                from:toString(id(da)),
+                to:toString(id(m2)),
+                text:'标准清洗'
+            }) as lines
+        WITH
             toString(id(da)) as rootId,
             apoc.coll.toSet(lines) as lines,
             apoc.coll.toSet(nodes) as nodes
         RETURN nodes,lines,rootId
         """
-    
+
     driver = None
     try:
         driver = connect_graph()
@@ -188,8 +279,14 @@ def standard_impact_graph(nodeid):
             res = {}
             for item in result:
                 res = {
-                    "nodes": [record for record in item['nodes'] if record['id']],
-                    "lines": [record for record in item['lines'] if record['from'] and record['to']],
+                    "nodes": [
+                        record for record in item['nodes'] if record['id']
+                    ],
+                    "lines": [
+                        record
+                        for record in item['lines']
+                        if record['from'] and record['to']
+                    ],
                     "rootId": item['rootId']
                 }
             return res
@@ -205,10 +302,10 @@ def standard_impact_graph(nodeid):
 def standard_all_graph(nodeid):
     """
     生成数据标准的所有关系图谱
-    
+
     Args:
         nodeid: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -220,17 +317,38 @@ def standard_all_graph(nodeid):
     OPTIONAL MATCH(b:DataModel)-[:clean_model]-(da)
     OPTIONAL MATCH(da)-[:clean_model]-(m1:DataMeta)-[:clean_model]-(da)
     OPTIONAL MATCH(da)-[:clean_model]-(m2:DataMeta)-[:clean_model]-(da)
-    WITH 
-        collect({id:toString(id(a)),text:a.name,type:split(labels(a)[0],'_')[1]})+
-        collect({id:toString(id(b)),text:b.name,type:split(labels(b)[0],'_')[1]})+
-        collect({id:toString(id(da)),text:da.name,type:split(labels(da)[0],'_')[1]})+
+    WITH
+        collect({
+            id:toString(id(a)),
+            text:a.name,
+            type:split(labels(a)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(b)),
+            text:b.name,
+            type:split(labels(b)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(da)),
+            text:da.name,
+            type:split(labels(da)[0],'_')[1]
+        })+
         collect({id:toString(id(m1)),text:m1.name})+
-        collect({id:toString(id(m2)),text:m2.name})as nodes,da,
+        collect({id:toString(id(m2)),text:m2.name}) as nodes,
+        da,
         collect({from:toString(id(a)),to:toString(id(da)),text:'标准'})+
         collect({from:toString(id(b)),to:toString(id(da)),text:'标准'})+
-        collect({from:toString(id(da)),to:toString(id(m1)),text:'标准清洗'})+
-        collect({from:toString(id(da)),to:toString(id(m2)),text:'标准清洗'})as lines
-    WITH  
+        collect({
+            from:toString(id(da)),
+            to:toString(id(m1)),
+            text:'标准清洗'
+        })+
+        collect({
+            from:toString(id(da)),
+            to:toString(id(m2)),
+            text:'标准清洗'
+        }) as lines
+    WITH
         toString(id(da)) as rootId,
         apoc.coll.toSet(lines) as lines,
         apoc.coll.toSet(nodes) as nodes
@@ -244,8 +362,14 @@ def standard_all_graph(nodeid):
             res = {}
             for item in result:
                 res = {
-                    "nodes": [record for record in item['nodes'] if record['id']],
-                    "lines": [record for record in item['lines'] if record['from'] and record['to']],
+                    "nodes": [
+                        record for record in item['nodes'] if record['id']
+                    ],
+                    "lines": [
+                        record
+                        for record in item['lines']
+                        if record['from'] and record['to']
+                    ],
                     "rootId": item['rootId']
                 }
             return res
@@ -258,11 +382,17 @@ def standard_all_graph(nodeid):
 
 
 # 数据标签列表展示
-def label_list(skip_count, page_size, name_en_filter=None,
-               name_zh_filter=None, category_filter=None, group_filter=None):
+def label_list(
+    skip_count,
+    page_size,
+    name_en_filter=None,
+    name_zh_filter=None,
+    category_filter=None,
+    group_filter=None,
+):
     """
     获取数据标签列表
-    
+
     Args:
         skip_count: 跳过的记录数量
         page_size: 每页记录数量
@@ -270,7 +400,7 @@ def label_list(skip_count, page_size, name_en_filter=None,
         name_zh_filter: 名称过滤条件
         category_filter: 分类过滤条件
         group_filter: 分组过滤条件
-        
+
     Returns:
         tuple: (数据列表, 总记录数)
     """
@@ -285,13 +415,14 @@ def label_list(skip_count, page_size, name_en_filter=None,
     if name_en_filter:
         where_clause.append("n.name_en CONTAINS $name_en_filter")
         params['name_en_filter'] = name_en_filter
-    if category_filter:
-        where_clause.append("n.category CONTAINS $category_filter")
-        params['category_filter'] = category_filter
+    where_clause.extend(
+        _build_category_filter_conditions(category_filter, params)
+    )
     if group_filter:
-        where_clause.append(f"n.group CONTAINS $group_filter")
+        where_clause.append("n.group CONTAINS $group_filter")
         params['group_filter'] = group_filter
-    else:
+
+    if not where_clause:
         where_clause.append("TRUE")
 
     where_str = " AND ".join(where_clause)
@@ -300,19 +431,38 @@ def label_list(skip_count, page_size, name_en_filter=None,
     cql = f"""
     MATCH (n:DataLabel)
     WHERE {where_str}
-    WITH n, properties(n) as properties, n.create_time as create_time, id(n) as nodeid
+    WITH
+        n,
+        properties(n) as properties,
+        n.create_time as create_time,
+        id(n) as nodeid
     OPTIONAL MATCH (n)<-[r]-()
-    WITH n, properties, create_time, nodeid, count(r) as incoming
+    WITH
+        n,
+        properties,
+        create_time,
+        nodeid,
+        count(r) as incoming
     OPTIONAL MATCH (n)-[r]->()
-    WITH n, properties, create_time, nodeid, incoming, count(r) as outgoing
-    RETURN properties, create_time, nodeid, incoming + outgoing as relationship_count
+    WITH
+        n,
+        properties,
+        create_time,
+        nodeid,
+        incoming,
+        count(r) as outgoing
+    RETURN
+        properties,
+        create_time,
+        nodeid,
+        incoming + outgoing as relationship_count
     ORDER BY create_time desc
     SKIP $skip_count
     LIMIT $page_size
     """
     params['skip_count'] = skip_count
     params['page_size'] = page_size
-    
+
     driver = None
     try:
         driver = connect_graph()
@@ -332,10 +482,14 @@ def label_list(skip_count, page_size, name_en_filter=None,
                 data.append(properties)
 
             # 获取总量
-            total_query = f"MATCH (n:DataLabel) WHERE {where_str} RETURN COUNT(n) AS total"
-            total_result = session.run(total_query, **params).single()["total"]
-            
-            return data, total_result
+            total_query = (
+                f"MATCH (n:DataLabel) WHERE {where_str} "
+                "RETURN COUNT(n) AS total"
+            )
+            total_record = session.run(total_query, **params).single()
+            total = total_record["total"] if total_record else 0
+
+            return data, total
     except (ConnectionError, ValueError) as e:
         logger.error(f"Neo4j数据库连接失败: {str(e)}")
         return [], 0
@@ -348,10 +502,10 @@ def label_list(skip_count, page_size, name_en_filter=None,
 def id_label_graph(id):
     """
     根据ID生成数据标签图谱
-    
+
     Args:
         id: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -359,13 +513,27 @@ def id_label_graph(id):
     MATCH (n:DataLabel)
     WHERE id(n) = $nodeId
     OPTIONAL MATCH (a)-[:LABEL]-(n)
-    WITH 
-       collect({from: toString(id(a)), to: toString(id(n)), text: "标签"}) AS line1,
-       collect({id: toString(id(n)), text: n.name_zh, type:"label"}) AS node1,
-       collect({id: toString(id(a)), text: a.name_zh, type: split(labels(a)[0], '_')[1]}) AS node2, n
-    WITH apoc.coll.toSet(line1) AS lines,
-                 apoc.coll.toSet(node1 + node2) AS nodes,
-                 toString(id(n)) AS res
+    WITH
+       collect({
+           from: toString(id(a)),
+           to: toString(id(n)),
+           text: "标签"
+       }) AS line1,
+       collect({
+           id: toString(id(n)),
+           text: n.name_zh,
+           type:"label"
+       }) AS node1,
+       collect({
+           id: toString(id(a)),
+           text: a.name_zh,
+           type: split(labels(a)[0], '_')[1]
+       }) AS node2,
+       n
+    WITH
+        apoc.coll.toSet(line1) AS lines,
+        apoc.coll.toSet(node1 + node2) AS nodes,
+        toString(id(n)) AS res
     RETURN lines, nodes, res
     """
     driver = None
@@ -376,8 +544,14 @@ def id_label_graph(id):
             res = {}
             for item in result:
                 res = {
-                    "nodes": [record for record in item['nodes'] if record['id']],
-                    "lines": [record for record in item['lines'] if record['from'] and record['to']],
+                    "nodes": [
+                        record for record in item['nodes'] if record['id']
+                    ],
+                    "lines": [
+                        record
+                        for record in item['lines']
+                        if record['from'] and record['to']
+                    ],
                     "rootId": item['res'],
                 }
             return res
@@ -393,10 +567,10 @@ def id_label_graph(id):
 def label_kinship_graph(nodeid):
     """
     生成数据标签的血缘关系图谱
-    
+
     Args:
         nodeid: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -408,20 +582,41 @@ def label_kinship_graph(nodeid):
     OPTIONAL MATCH(b:DataModel)-[:LABEL]-(la)
     OPTIONAL MATCH(meta:DataMeta)-[:LABEL]-(la)
     OPTIONAL MATCH(d:data_standard)-[:LABEL]-(la)
-          OPTIONAL MATCH(e:DataMetric)-[:LABEL]-(la)
-    WITH 
-        collect({id:toString(id(a)),text:a.name_zh,type:split(labels(a)[0],'_')[1]})+
-        collect({id:toString(id(b)),text:b.name_zh,type:split(labels(b)[0],'_')[1]})+
-        collect({id:toString(id(d)),text:d.name_zh,type:split(labels(d)[0],'_')[1]})+
-        collect({id:toString(id(e)),text:e.name_zh,type:split(labels(e)[0],'_')[1]})+
-        collect({id:toString(id(la)),text:la.name_zh,type:split(labels(la)[0],'_')[1]})+
-        collect({id:toString(id(meta)),text:meta.name_zh}) as nodes,la,
+    OPTIONAL MATCH(e:DataMetric)-[:LABEL]-(la)
+    WITH
+        collect({
+            id:toString(id(a)),
+            text:a.name_zh,
+            type:split(labels(a)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(b)),
+            text:b.name_zh,
+            type:split(labels(b)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(d)),
+            text:d.name_zh,
+            type:split(labels(d)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(e)),
+            text:e.name_zh,
+            type:split(labels(e)[0],'_')[1]
+        })+
+        collect({
+            id:toString(id(la)),
+            text:la.name_zh,
+            type:split(labels(la)[0],'_')[1]
+        })+
+        collect({id:toString(id(meta)),text:meta.name_zh}) as nodes,
+        la,
         collect({from:toString(id(a)),to:toString(id(la)),text:'标签'})+
         collect({from:toString(id(b)),to:toString(id(la)),text:'标签'})+
         collect({from:toString(id(meta)),to:toString(id(la)),text:'标签'})+
         collect({from:toString(id(d)),to:toString(id(la)),text:'标签'})+
-        collect({from:toString(id(e)),to:toString(id(la)),text:'标签'})as lines
-    WITH  
+        collect({from:toString(id(e)),to:toString(id(la)),text:'标签'}) as lines
+    WITH
         toString(id(la)) as rootId,
         apoc.coll.toSet(lines) as lines,
         apoc.coll.toSet(nodes) as nodes
@@ -435,8 +630,14 @@ def label_kinship_graph(nodeid):
             res = {}
             for item in result:
                 res = {
-                    "nodes": [record for record in item['nodes'] if record['id']],
-                    "lines": [record for record in item['lines'] if record['from'] and record['to']],
+                    "nodes": [
+                        record for record in item['nodes'] if record['id']
+                    ],
+                    "lines": [
+                        record
+                        for record in item['lines']
+                        if record['from'] and record['to']
+                    ],
                     "rootId": item['rootId']
                 }
             return res
@@ -452,10 +653,10 @@ def label_kinship_graph(nodeid):
 def label_impact_graph(nodeid):
     """
     生成数据标签的影响关系图谱
-    
+
     Args:
         nodeid: 节点ID
-        
+
     Returns:
         图谱数据
     """
@@ -463,8 +664,12 @@ def label_impact_graph(nodeid):
     cql = """
         MATCH(n:DataLabel)
         WHERE id(n)=$nodeId
-        RETURN {id:toString(id(n)),text:(n.name_zh),type:"label"} AS nodes,
-               toString(id(n)) as rootId
+        RETURN {
+            id:toString(id(n)),
+            text:(n.name_zh),
+            type:"label"
+        } AS nodes,
+        toString(id(n)) as rootId
         """
     driver = None
     try:
@@ -491,17 +696,19 @@ def label_impact_graph(nodeid):
 def dynamic_label_list(name_filter=None):
     """
     根据内容查询相似的数据标签分组
-    
+
     Args:
         name_filter: 内容过滤条件
-        
+
     Returns:
         标签分组列表
     """
     # 构建完整的查询语句
-    cql = f"""
+    cql = """
     MATCH (n:DataLabel)
-    WITH n, apoc.text.levenshteinSimilarity(n.group, "{name_filter}") AS similarity
+    WITH
+        n,
+        apoc.text.levenshteinSimilarity(n.group, $name_filter) AS similarity
     WHERE similarity > 0.1 // 设置相似度阈值
     RETURN DISTINCT n.group as name_zh, id(n) as nodeid
     """
@@ -510,14 +717,16 @@ def dynamic_label_list(name_filter=None):
     try:
         driver = connect_graph()
         with driver.session() as session:
-            result = session.run(cql)
+            result = session.run(cql, name_filter=name_filter or "")
             data = []
             for record in result:
-                data.append({
-                    "name_zh": record['name_zh'],
-                    "id": record['nodeid']
-                })
-            
+                data.append(
+                    {
+                        "name_zh": record['name_zh'],
+                        "id": record['nodeid'],
+                    }
+                )
+
             return data
     except (ConnectionError, ValueError) as e:
         logger.error(f"Neo4j数据库连接失败: {str(e)}")
@@ -526,65 +735,223 @@ def dynamic_label_list(name_filter=None):
         if driver:
             driver.close()
 
+
 def search_info(key, value):
     """
     搜索指定属性的节点信息
-    
+
     Args:
         key: 搜索属性键
         value: 搜索属性值
-        
+
     Returns:
         搜索结果列表
     """
+    field_pattern = r"^[A-Za-z_][A-Za-z0-9_]*$"
+    if not re.match(field_pattern, str(key)):
+        logger.warning("非法属性键: %s", key)
+        return []
+
     query = """
     MATCH (n)
-    WHERE n.{} =~ '(?i).*{}.*'
-    WITH n, properties(n) as properties, n.create_time as create_time, id(n) as nodeid
+    WHERE n[$field] =~ $pattern
+    WITH
+        n,
+        properties(n) as properties,
+        n.create_time as create_time,
+        id(n) as nodeid
     RETURN properties, nodeid, create_time, labels(n) as labels
     LIMIT 30
-    """.format(key, value)
-    
-    result = connect_graph.run(query)
-    
-    results = []
-    for record in result:
-        results.append({
-            "properties": record["properties"],
-            "id": record["nodeid"],
-            "create_time": record["create_time"],
-            "labels": record["labels"]
-        })
-    
-    return results
+    """
+
+    driver = None
+    try:
+        driver = connect_graph()
+    except (ConnectionError, ValueError) as e:
+        logger.error(f"无法连接到Neo4j数据库: {str(e)}")
+        return []
+
+    try:
+        with driver.session() as session:
+            result = session.run(
+                query,
+                field=key,
+                pattern=f"(?i).*{value}.*",
+            )
+
+            results = []
+            for record in result:
+                results.append(
+                    {
+                        "properties": record["properties"],
+                        "id": record["nodeid"],
+                        "create_time": record["create_time"],
+                        "labels": record["labels"],
+                    }
+                )
+
+            return results
+    except Exception as e:
+        logger.error(f"搜索节点信息失败: {str(e)}")
+        return []
+    finally:
+        if driver:
+            driver.close()
+
 
 def label_info(id):
     """
     获取标签节点的信息
-    
+
     Args:
         id: 节点ID
-        
+
     Returns:
         标签节点信息
     """
     query = """
     MATCH (n)
     WHERE id(n) = $nodeId
-    RETURN {id:toString(id(n)),text:(n.name_zh),type:"label"} AS nodes,
-            toString(id(n)) as rootId
+    RETURN {
+        id:toString(id(n)),
+        text:(n.name_zh),
+        type:"label"
+    } AS nodes,
+    toString(id(n)) as rootId
+    """
+
+    driver = None
+    try:
+        driver = connect_graph()
+        with driver.session() as session:
+            result = session.run(query, nodeId=id).data()
+            return result[0] if result else {}
+    except (ConnectionError, ValueError) as e:
+        logger.error(f"无法连接到Neo4j数据库: {str(e)}")
+        return {}
+    finally:
+        if driver:
+            driver.close()
+
+
+def graph_all(domain_id, include_meta=True):
+    """
+    获取完整关系图谱
+
+    Args:
+        domain_id: 节点ID
+        include_meta: 是否包含元数据节点
+
+    Returns:
+        dict: 包含 nodes 与 lines 的图谱数据
     """
-    res = connect_graph.run(query, nodeId=id).data()
-    return res[0] if res else {}
+    try:
+        domain_id_int = int(domain_id)
+    except (ValueError, TypeError):
+        logger.error(f"节点ID不是有效的整数: {domain_id}")
+        return {"nodes": [], "lines": []}
+
+    try:
+        with neo4j_driver.get_session() as session:
+            nodes = {}
+            lines = {}
+
+            # 使用路径查询同时获取节点和关系
+            if include_meta:
+                cypher = """
+                MATCH (n)
+                WHERE id(n) = $domain_id
+                OPTIONAL MATCH path = (n)-[r]-(m)
+                RETURN n,
+                       collect(DISTINCT m) as related_nodes,
+                       collect(DISTINCT r) as relationships
+                """
+            else:
+                cypher = """
+                MATCH (n)
+                WHERE id(n) = $domain_id
+                OPTIONAL MATCH path = (n)-[r]-(m)
+                WHERE NOT (m:DataMeta)
+                RETURN n,
+                       collect(DISTINCT m) as related_nodes,
+                       collect(DISTINCT r) as relationships
+                """
+
+            result = session.run(cypher, domain_id=domain_id_int)
+            record = result.single()
+
+            if not record:
+                logger.warning(f"未找到节点: {domain_id_int}")
+                return {"nodes": [], "lines": []}
+
+            # 处理起始节点
+            n_node = record["n"]
+            if n_node:
+                n_props = dict(n_node)
+                n_labels = list(n_node.labels)
+                n_props["id"] = domain_id_int
+                n_props["node_type"] = n_labels[0] if n_labels else ""
+                nodes[domain_id_int] = n_props
+
+            # 处理关联节点
+            related_nodes = record["related_nodes"] or []
+            for m_node in related_nodes:
+                if m_node is None:
+                    continue
+                m_elem_id = m_node.element_id
+                m_id = int(m_elem_id.split(":")[-1])
+                if m_id not in nodes:
+                    m_props = dict(m_node)
+                    m_labels = list(m_node.labels)
+                    m_props["id"] = m_id
+                    m_props["node_type"] = m_labels[0] if m_labels else ""
+                    nodes[m_id] = m_props
+
+            # 处理关系
+            relationships = record["relationships"] or []
+            for rel in relationships:
+                if rel is None:
+                    continue
+                rel_elem_id = rel.element_id
+                rel_id = rel_elem_id.split(":")[-1]
+                if rel_id not in lines:
+                    # 获取关系的起始和结束节点 ID
+                    start_elem_id = rel.start_node.element_id
+                    end_elem_id = rel.end_node.element_id
+                    start_id = start_elem_id.split(":")[-1]
+                    end_id = end_elem_id.split(":")[-1]
+                    # 获取关系类型
+                    rel_type = type(rel).__name__
+                    lines[rel_id] = {
+                        "id": rel_id,
+                        "from": start_id,
+                        "to": end_id,
+                        "text": rel_type,
+                    }
+
+            logger.info(
+                f"graph_all 结果: node_id={domain_id_int}, "
+                f"nodes={len(nodes)}, lines={len(lines)}"
+            )
+
+            return {
+                "nodes": list(nodes.values()),
+                "lines": list(lines.values()),
+            }
+    except Exception as e:
+        logger.error(f"获取图谱失败: {str(e)}")
+        import traceback
+        logger.error(traceback.format_exc())
+        return {"nodes": [], "lines": []}
 
 
 def node_delete(node_id):
     """
     删除 DataLabel 节点及其所有关联关系
-    
+
     Args:
         node_id: 节点ID(整数)
-        
+
     Returns:
         dict: 删除结果,包含 success 状态和 message 信息
     """
@@ -594,7 +961,7 @@ def node_delete(node_id):
     except (ConnectionError, ValueError) as e:
         logger.error(f"无法连接到Neo4j数据库: {str(e)}")
         return {"success": False, "message": "无法连接到数据库"}
-    
+
     try:
         with driver.session() as session:
             # 首先检查节点是否存在且为 DataLabel 类型
@@ -603,12 +970,18 @@ def node_delete(node_id):
             WHERE id(n) = $nodeId
             RETURN n
             """
-            check_result = session.run(check_query, nodeId=node_id).single()
-            
+            check_result = session.run(
+                check_query,
+                nodeId=node_id,
+            ).single()
+
             if not check_result:
                 logger.warning(f"DataLabel 节点不存在: ID={node_id}")
-                return {"success": False, "message": f"DataLabel 节点不存在 (ID: {node_id})"}
-            
+                return {
+                    "success": False,
+                    "message": f"DataLabel 节点不存在 (ID: {node_id})",
+                }
+
             # 删除节点及其所有关系
             delete_query = """
             MATCH (n:DataLabel)
@@ -616,12 +989,26 @@ def node_delete(node_id):
             DETACH DELETE n
             RETURN count(n) as deleted_count
             """
-            delete_result = session.run(delete_query, nodeId=node_id).single()
+            delete_result = session.run(
+                delete_query,
+                nodeId=node_id,
+            ).single()
+            if not delete_result:
+                logger.warning(f"删除结果为空: ID={node_id}")
+                return {
+                    "success": False,
+                    "message": "删除失败,未获取到删除结果",
+                }
             deleted_count = delete_result["deleted_count"]
-            
+
             if deleted_count > 0:
                 logger.info(f"成功删除 DataLabel 节点: ID={node_id}")
-                return {"success": True, "message": f"成功删除 DataLabel 节点 (ID: {node_id})"}
+                return {
+                    "success": True,
+                    "message": (
+                        f"成功删除 DataLabel 节点 (ID: {node_id})"
+                    ),
+                }
             else:
                 logger.warning(f"删除失败,节点可能已被删除: ID={node_id}")
                 return {"success": False, "message": "删除失败,节点可能已被删除"}
@@ -630,4 +1017,4 @@ def node_delete(node_id):
         return {"success": False, "message": f"删除失败: {str(e)}"}
     finally:
         if driver:
-            driver.close()
+            driver.close()

+ 28 - 18
app/core/data_metric/metric_interface.py

@@ -40,43 +40,52 @@ def metric_list(skip_count, page_size, name_en_filter=None,
     data = []
 
     # 构建查询条件
-    where_clause = []
     params = {}
-    
-    # 基础节点条件
+
+    # 节点本身的过滤条件(放在 MATCH 之后、OPTIONAL MATCH 之前)
+    node_conditions = []
     if name_zh_filter:
-        where_clause.append("n.name_zh CONTAINS $name_zh_filter")
+        node_conditions.append("n.name_zh CONTAINS $name_zh_filter")
         params['name_zh_filter'] = name_zh_filter
     if name_en_filter:
-        where_clause.append("n.name_en CONTAINS $name_en_filter")
+        node_conditions.append("n.name_en CONTAINS $name_en_filter")
         params['name_en_filter'] = name_en_filter
     if category_filter:
-        where_clause.append("n.category CONTAINS $category_filter")
+        node_conditions.append("n.category CONTAINS $category_filter")
         params['category_filter'] = category_filter
     if create_time_filter:
-        where_clause.append("n.create_time CONTAINS $create_time_filter")
+        node_conditions.append("n.create_time CONTAINS $create_time_filter")
         params['create_time_filter'] = create_time_filter
-    
-    # 标签过滤条件
+
+    node_where = (
+        "WHERE " + " AND ".join(node_conditions)
+        if node_conditions else ""
+    )
+
+    # 标签过滤条件(放在 OPTIONAL MATCH 之后)
+    tag_conditions = []
     if tag_filter:
-        where_clause.append("id(la) = $tag_filter")
+        tag_conditions.append("id(la) = $tag_filter")
         params['tag_filter'] = tag_filter
 
-    # 构建WHERE子句
-    where_str = " AND ".join(where_clause) if where_clause else "TRUE"
+    tag_where = (
+        "WHERE " + " AND ".join(tag_conditions)
+        if tag_conditions else ""
+    )
 
-    # 构建查询语句 - 移除DataModel相关查询
+    # 构建查询语句
     cql = f"""
     MATCH (n:DataMetric)
+    {node_where}
     OPTIONAL MATCH (n)-[:LABEL]->(la:DataLabel)
-    WHERE {where_str}
+    {tag_where}
     WITH n, la,
          properties(n) AS properties,
          n.create_time AS create_time,
          id(n) AS nodeid,
-         CASE WHEN la IS NOT NULL 
-              THEN {{id: id(la), name_zh: la.name_zh}} 
-              ELSE null 
+         CASE WHEN la IS NOT NULL
+              THEN {{id: id(la), name_zh: la.name_zh}}
+              ELSE null
          END AS tag
     RETURN properties, create_time, nodeid, tag
     ORDER BY create_time DESC
@@ -114,8 +123,9 @@ def metric_list(skip_count, page_size, name_en_filter=None,
             # 获取总数 - 使用相同的过滤条件
             total_query = f"""
             MATCH (n:DataMetric)
+            {node_where}
             OPTIONAL MATCH (n)-[:LABEL]->(la:DataLabel)
-            WHERE {where_str}
+            {tag_where}
             RETURN COUNT(DISTINCT n) AS total
             """
             total_result = session.run(total_query, **params).single()["total"]

+ 317 - 170
app/core/meta_data/meta_data.py

@@ -8,45 +8,46 @@ import logging
 from app.services.neo4j_driver import neo4j_driver
 import ast
 import re
-import pandas as pd
 from minio import S3Error
-from py2neo import Relationship
+from typing import Any
 import json
-import io
-import random
-import string
-import numpy as np
 from openai import OpenAI
 from flask import current_app
 from app.core.llm.llm_service import llm_client as llm_call  # 导入core/llm模块的函数
 
 logger = logging.getLogger("app")
 
+
 def serialize_neo4j_object(obj):
     """
     将Neo4j对象转换为可JSON序列化的格式
-    
+
     Args:
         obj: Neo4j节点或属性值
-        
+
     Returns:
         序列化后的对象
     """
     if hasattr(obj, 'year'):  # DateTime对象
         # 将Neo4j DateTime转换为字符串
-        return obj.strftime("%Y-%m-%d %H:%M:%S") if hasattr(obj, 'strftime') else str(obj)
+        return (
+            obj.strftime("%Y-%m-%d %H:%M:%S")
+            if hasattr(obj, 'strftime')
+            else str(obj)
+        )
     elif hasattr(obj, '__dict__'):  # 复杂对象
         return str(obj)
     else:
         return obj
 
+
 def serialize_node_properties(node):
     """
     将Neo4j节点属性序列化为可JSON化的字典
-    
+
     Args:
         node: Neo4j节点对象
-        
+
     Returns:
         dict: 序列化后的属性字典
     """
@@ -55,72 +56,97 @@ def serialize_node_properties(node):
         properties[key] = serialize_neo4j_object(value)
     return properties
 
+
 def get_formatted_time():
     """获取格式化的当前时间"""
     return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
 
+
 def translate_and_parse(content):
     """
     翻译内容并返回结果
-    
+
     Args:
         content: 需要翻译的内容
-        
+
     Returns:
         list: 包含翻译结果的列表
     """
     # 调用LLM服务进行翻译
     translated_text = llm_call(content)
-    
+
     # 如果翻译失败,返回原文
     if translated_text is None:
         return [content]
-    
+
     # 确保返回格式为列表
     return [translated_text]
 
+
 # 为保持原有功能,保留旧的llm_client函数
 def llm_client(content):
     """调用LLM服务进行中英文翻译,返回结果"""
     # 直接调用基础服务层的翻译函数
     return llm_call(content)
 
+
 def infer_column_type(df):
     try:
         # 列名
         res = df.columns.to_list()
         columns = ','.join(res)
-        
+
         # 使用配置中的LLM参数
         api_k = current_app.config.get('LLM_API_KEY')
         base_u = current_app.config.get('LLM_BASE_URL')
-        model = current_app.config.get('LLM_MODEL_NAME')
-        
+        model = current_app.config.get('LLM_MODEL_NAME') or "gpt-4o-mini"
+
         client = OpenAI(api_key=api_k, base_url=base_u)
-        response = client.chat.completions.create(
+        response = client.chat.completions.create(  # type: ignore[arg-type]
             model=model,
             messages=[
-                {"role": "system", "content": "你是一个PostgreSQL数据库专家,精通PostgreSQL所有数据类型和最佳实践"},
-                {"role": "user",
-                 "content": "请根据以下数据表内容:" + str(df.head(n=6)) + "其列名为" + columns +
-                            ",帮我判断每个列最合适的PostgreSQL数据类型。请注意以下要求:" +
-                            "1. 对于文本数据,使用varchar并给出合适长度,如varchar(50)、varchar(255)等" +
-                            "2. 对于整数,根据数值范围选择smallint、integer或bigint" +
-                            "3. 对于小数,如果是金额相关字段使用numeric(15,2),其他小数使用numeric(18,6)" +
-                            "4. 对于日期时间,根据实际情况选择date、time或timestamp" +
-                            "5. 对于布尔值,使用boolean类型" +
-                            "6. 如果是JSON数据,使用jsonb类型" +
-                            "请以列表格式返回,列表中的元素顺序要与输入的列名顺序一致,如:" +
-                            "['varchar(255)', 'integer', 'numeric(15,2)', 'timestamp']" +
-                            "只返回列表,不要有任何其他说明文字"}
+                {
+                    "role": "system",
+                    "content": (
+                        "你是一个PostgreSQL数据库专家,精通PostgreSQL所有数据类型和最佳实践"
+                    ),
+                },
+                {
+                    "role": "user",
+                    "content": (
+                        "请根据以下数据表内容:" + str(df.head(n=6))
+                        + "其列名为" + columns
+                        + ",帮我判断每个列最合适的PostgreSQL数据类型。请注意以下要求:"
+                        + (
+                            "1. 对于文本数据,使用varchar并给出合适长度,如varchar(50)、"
+                            "varchar(255)等"
+                        )
+                        + "2. 对于整数,根据数值范围选择smallint、integer或bigint"
+                        + (
+                            "3. 对于小数,如果是金额相关字段使用numeric(15,2),"
+                            "其他小数使用numeric(18,6)"
+                        )
+                        + "4. 对于日期时间,根据实际情况选择date、time或timestamp"
+                        + "5. 对于布尔值,使用boolean类型"
+                        + "6. 如果是JSON数据,使用jsonb类型"
+                        + (
+                            "请以列表格式返回,列表的顺序要与输入列名一致,如:"
+                            "['varchar(255)', 'integer', "
+                            "'numeric(15,2)', 'timestamp']"
+                            "只返回列表,不要有任何其他说明文字"
+                        )
+                    ),
+                },
             ],
             max_tokens=1024,
             temperature=0.1,
-            stream=False
+            stream=False,
         )
-        result = response.choices[0].message.content
-        res = result.strip('`').strip('python').strip('`').strip()
-        
+        content = response.choices[0].message.content
+        if not content:
+            raise ValueError("LLM 返回内容为空")
+        res = str(content).strip('`').replace('python', '').strip('`').strip()
+
         # 使用 ast.literal_eval 函数将字符串转换为列表
         result_list = ast.literal_eval(res)
         return result_list
@@ -129,11 +155,20 @@ def infer_column_type(df):
         # 返回一个空列表或默认类型列表,保持返回类型一致
         return ['varchar(255)'] * len(df.columns) if not df.empty else []
 
-def meta_list(page, page_size, search="", name_en_filter=None, 
-             name_zh_filter=None, category_filter=None, create_time_filter=None, tag_filter=None):
+
+def meta_list(
+    page,
+    page_size,
+    search="",
+    name_en_filter=None,
+    name_zh_filter=None,
+    category_filter=None,
+    create_time_filter=None,
+    tag_filter=None,
+):
     """
     获取元数据列表
-    
+
     Args:
         page: 当前页码
         page_size: 每页数量
@@ -143,7 +178,7 @@ def meta_list(page, page_size, search="", name_en_filter=None,
         category_filter: 分类过滤
         create_time_filter: 时间过滤
         tag_filter: 标签过滤
-    
+
     Returns:
         tuple: (result_list, total_count)
     """
@@ -151,55 +186,126 @@ def meta_list(page, page_size, search="", name_en_filter=None,
         with neo4j_driver.get_session() as session:
             # 构建查询条件
             match_clause = "MATCH (n:DataMeta)"
+            optional_match = "OPTIONAL MATCH (n)-[:LABEL]->(t:DataLabel)"
             where_conditions = []
-            
+            params: dict = {}
+
             if search:
-                where_conditions.append(f"n.name_zh CONTAINS '{search}'")
-            
+                where_conditions.append("n.name_zh CONTAINS $search")
+                params["search"] = search
+
             if name_en_filter:
-                where_conditions.append(f"n.name_en CONTAINS '{name_en_filter}'")
-                
+                where_conditions.append("n.name_en CONTAINS $name_en_filter")
+                params["name_en_filter"] = name_en_filter
+
             if name_zh_filter:
-                where_conditions.append(f"n.name_zh CONTAINS '{name_zh_filter}'")
-                
+                where_conditions.append("n.name_zh CONTAINS $name_zh_filter")
+                params["name_zh_filter"] = name_zh_filter
+
             if category_filter:
-                where_conditions.append(f"n.category = '{category_filter}'")
-                
+                where_conditions.append("n.category = $category_filter")
+                params["category_filter"] = category_filter
+
             if create_time_filter:
-                where_conditions.append(f"n.create_time CONTAINS '{create_time_filter}'")
-                
-            if tag_filter:
-                where_conditions.append(f"n.tag = '{tag_filter}'")
-            
-            where_clause = " WHERE " + " AND ".join(where_conditions) if where_conditions else ""
-            
+                where_conditions.append(
+                    "n.create_time CONTAINS $create_time_filter"
+                )
+                params["create_time_filter"] = create_time_filter
+
+            # 构建主节点的 WHERE 子句
+            where_clause = (
+                " WHERE " + " AND ".join(where_conditions)
+                if where_conditions else ""
+            )
+
+            # 处理 tag_filter - 支持 ID 列表或对象列表
+            tag_where_clause = ""
+            if tag_filter and isinstance(tag_filter, list):
+                tag_ids = []
+                for item in tag_filter:
+                    if isinstance(item, dict) and 'id' in item:
+                        tag_ids.append(int(item['id']))
+                    elif isinstance(item, (int, str)):
+                        try:
+                            tag_ids.append(int(item))
+                        except (ValueError, TypeError):
+                            pass
+                if tag_ids:
+                    tag_where_clause = " WHERE id(t) IN $tag_ids"
+                    params["tag_ids"] = tag_ids
+
             # 计算总数
-            count_cypher = f"{match_clause}{where_clause} RETURN count(n) as count"
-            count_result = session.run(count_cypher)
-            total_count = count_result.single()["count"]
-            
+            if tag_where_clause:
+                # 有 tag 过滤时,使用 MATCH 而非 OPTIONAL MATCH
+                count_cypher = f"""
+                {match_clause}
+                {where_clause}
+                MATCH (n)-[:LABEL]->(t:DataLabel)
+                {tag_where_clause}
+                RETURN count(DISTINCT n) as count
+                """
+            else:
+                count_cypher = f"""
+                {match_clause}
+                {where_clause}
+                RETURN count(DISTINCT n) as count
+                """
+            count_result = session.run(count_cypher, **params)
+            count_record = count_result.single()
+            total_count = count_record["count"] if count_record else 0
+
             # 分页查询
             skip = (page - 1) * page_size
-            cypher = f"""
-            {match_clause}{where_clause}
-            RETURN n
-            ORDER BY n.name_zh
-            SKIP {skip} LIMIT {page_size}
-            """
-            result = session.run(cypher)
-            
+            params["skip"] = skip
+            params["limit"] = page_size
+            if tag_where_clause:
+                # 有 tag 过滤时,先匹配符合条件的节点
+                cypher = f"""
+                {match_clause}
+                {where_clause}
+                MATCH (n)-[:LABEL]->(t:DataLabel)
+                {tag_where_clause}
+                WITH DISTINCT n
+                {optional_match}
+                RETURN n, collect(DISTINCT t) as tags
+                ORDER BY n.name_zh
+                SKIP $skip LIMIT $limit
+                """
+            else:
+                cypher = f"""
+                {match_clause}
+                {where_clause}
+                {optional_match}
+                RETURN n, collect(DISTINCT t) as tags
+                ORDER BY n.name_zh
+                SKIP $skip LIMIT $limit
+                """
+            result = session.run(cypher, **params)
+
             # 格式化结果
             result_list = []
             for record in result:
                 node = serialize_node_properties(record["n"])
                 node["id"] = record["n"].id
+
+                tag_nodes = record.get("tags") or []
+                tag_list = []
+                for tag in tag_nodes:
+                    if tag:
+                        tag_list.append({
+                            "id": tag.id,
+                            "name_zh": tag.get("name_zh", ""),
+                            "name_en": tag.get("name_en", ""),
+                        })
+                node["tag"] = tag_list
                 result_list.append(node)
-                
+
             return result_list, total_count
     except Exception as e:
         logger.error(f"获取元数据列表失败: {str(e)}")
         raise
 
+
 def handle_id_unstructured(node_id):
     """处理非结构化数据节点"""
     try:
@@ -207,14 +313,14 @@ def handle_id_unstructured(node_id):
         if node_id is None:
             logger.error("node_id参数不能为None")
             return None
-        
+
         # 确保node_id为整数
         try:
             node_id_int = int(node_id)
         except (ValueError, TypeError):
             logger.error(f"node_id不是有效的整数: {node_id}")
             return None
-        
+
         with neo4j_driver.get_session() as session:
             query = "MATCH (n) WHERE id(n) = $node_id RETURN n"
             result = session.run(query, node_id=node_id_int)
@@ -227,12 +333,13 @@ def handle_id_unstructured(node_id):
         logger.error(f"处理非结构化数据节点失败: {str(e)}")
         raise
 
+
 def get_file_content(minio_client, bucket_name, object_name):
     """从MinIO获取文件内容"""
     try:
         # 获取对象
         response = minio_client.get_object(bucket_name, object_name)
-        
+
         # 读取内容
         file_content = response.read().decode('utf-8')
         return file_content
@@ -243,21 +350,23 @@ def get_file_content(minio_client, bucket_name, object_name):
         response.close()
         response.release_conn()
 
+
 def parse_text(text):
     """解析文本内容,提取关键信息"""
     # 提取作者信息
     author_match = re.search(r'作者[::]\s*(.+?)[\n\r]', text)
     author = author_match.group(1) if author_match else ""
-    
+
     # 提取关键词
     keyword_match = re.search(r'关键词[::]\s*(.+?)[\n\r]', text)
     keywords = keyword_match.group(1) if keyword_match else ""
-    
+
     return {
         "author": author.strip(),
         "keywords": keywords.strip()
     }
 
+
 def parse_keyword(content):
     """解析关键词"""
     if "," in content:
@@ -269,25 +378,26 @@ def parse_keyword(content):
     else:
         return [content]
 
+
 def text_resource_solve(receiver, name_zh, keyword):
     """处理文本资源解析"""
     try:
         # 构建提示词 - 使用简短明确的指令
         prompt = f"{name_zh}"
-        
+
         # 调用LLM获取英文翻译
         name_en = llm_client(prompt)
-        
+
         # 提取关键词
         keywords = parse_keyword(keyword)
-        
+
         # 为每个关键词获取英文翻译
         keywords_en = []
         for kw in keywords:
             # 直接使用关键词作为翻译输入
             kw_en = llm_client(kw)
             keywords_en.append(kw_en)
-            
+
         # 构建返回数据
         return {
             "name_zh": name_zh,
@@ -299,13 +409,14 @@ def text_resource_solve(receiver, name_zh, keyword):
         logger.error(f"文本资源处理失败: {str(e)}")
         raise
 
+
 def meta_kinship_graph(node_id):
     """
     获取元数据亲缘关系图谱
-    
+
     Args:
         node_id: 元数据节点ID
-    
+
     Returns:
         dict: 图谱数据
     """
@@ -314,66 +425,79 @@ def meta_kinship_graph(node_id):
         if node_id is None:
             logger.error("node_id参数不能为None")
             return {"nodes": [], "relationships": []}
-        
+
         # 确保node_id为整数
         try:
             node_id_int = int(node_id)
         except (ValueError, TypeError):
             logger.error(f"node_id不是有效的整数: {node_id}")
             return {"nodes": [], "relationships": []}
-        
+
         with neo4j_driver.get_session() as session:
-            # 获取节点和直接关系
+            # 获取节点及其直接关系(可为空)
             cypher = """
-            MATCH (n)-[r]-(m)
+            MATCH (n)
             WHERE id(n) = $node_id
+            OPTIONAL MATCH (n)-[r]-(m)
             RETURN n, r, m
             """
             result = session.run(cypher, node_id=node_id_int)
-            
-            # 格式化结果为图谱数据
-            nodes = {}
-            relationships = []
-            
+
+            nodes: dict[int, dict] = {}
+            relationships: list[dict] = []
+
             for record in result:
-                # 处理源节点
-                source_node = serialize_node_properties(record["n"])
-                source_node["id"] = record["n"].id
+                n_node = record["n"]
+                if n_node:
+                    source_node = serialize_node_properties(n_node)
+                    source_node["id"] = n_node.id
                 nodes[source_node["id"]] = source_node
-                
-                # 处理目标节点
-                target_node = serialize_node_properties(record["m"])
-                target_node["id"] = record["m"].id
+
+                rel = record.get("r")
+                m_node = record.get("m")
+
+                if m_node:
+                    target_node = serialize_node_properties(m_node)
+                    target_node["id"] = m_node.id
                 nodes[target_node["id"]] = target_node
-                
-                # 处理关系
-                rel = record["r"]
-                relationship = {
-                    "id": rel.id,
-                    "source": record["n"].id,
-                    "target": record["m"].id,
-                    "type": rel.type
-                }
-                relationships.append(relationship)
-            
-            # 转换为列表
-            nodes_list = list(nodes.values())
-            
+
+                if rel and n_node and m_node:
+                    relationships.append({
+                        "id": rel.id,
+                        "source": n_node.id,
+                        "target": m_node.id,
+                        "type": rel.type,
+                    })
+
+            # 若无关系结果但节点存在,确保节点仍被返回
+            if not nodes:
+                node_only = session.run(
+                    "MATCH (n) WHERE id(n) = $node_id RETURN n",
+                    node_id=node_id_int,
+                ).single()
+                if node_only and node_only["n"]:
+                    n_node = node_only["n"]
+                    nodes[n_node.id] = {
+                        **serialize_node_properties(n_node),
+                        "id": n_node.id,
+                    }
+
             return {
-                "nodes": nodes_list,
-                "relationships": relationships
+                "nodes": list(nodes.values()),
+                "relationships": relationships,
             }
     except Exception as e:
         logger.error(f"获取元数据亲缘关系图谱失败: {str(e)}")
         raise
 
+
 def meta_impact_graph(node_id):
     """
     获取元数据影响关系图谱
-    
+
     Args:
         node_id: 元数据节点ID
-    
+
     Returns:
         dict: 图谱数据
     """
@@ -382,14 +506,14 @@ def meta_impact_graph(node_id):
         if node_id is None:
             logger.error("node_id参数不能为None")
             return {"nodes": [], "relationships": []}
-        
+
         # 确保node_id为整数
         try:
             node_id_int = int(node_id)
         except (ValueError, TypeError):
             logger.error(f"node_id不是有效的整数: {node_id}")
             return {"nodes": [], "relationships": []}
-        
+
         with neo4j_driver.get_session() as session:
             # 获取所有可达节点和关系
             cypher = """
@@ -398,25 +522,26 @@ def meta_impact_graph(node_id):
             RETURN path
             """
             result = session.run(cypher, node_id=node_id_int)
-            
+
             # 格式化结果
             nodes = {}
             relationships = set()
-            
+
             for record in result:
                 path = record["path"]
-                
+
                 # 处理路径中的所有节点
                 for node in path.nodes:
                     node_dict = serialize_node_properties(node)
                     node_dict["id"] = node.id
                     nodes[node.id] = node_dict
-                
+
                 # 处理路径中的所有关系
                 for rel in path.relationships:
-                    relationship = (rel.id, rel.start_node.id, rel.end_node.id, rel.type)
+                    relationship = (rel.id, rel.start_node.id,
+                                    rel.end_node.id, rel.type)
                     relationships.add(relationship)
-            
+
             # 转换为列表
             nodes_list = list(nodes.values())
             relationships_list = [
@@ -428,7 +553,7 @@ def meta_impact_graph(node_id):
                 }
                 for rel in relationships
             ]
-            
+
             return {
                 "nodes": nodes_list,
                 "relationships": relationships_list
@@ -437,6 +562,7 @@ def meta_impact_graph(node_id):
         logger.error(f"获取元数据影响关系图谱失败: {str(e)}")
         raise
 
+
 def parse_entity_relation(text):
     """从文本中解析实体关系"""
     try:
@@ -446,14 +572,14 @@ def parse_entity_relation(text):
         [
           {{"entity1": "实体1", "relation": "关系", "entity2": "实体2"}}
         ]
-        
+
         文本内容:
         {text}
         """
-        
+
         # 调用LLM获取关系提取结果
         result = llm_client(prompt)
-        
+
         # 解析JSON结果
         try:
             relations = json.loads(result)
@@ -461,11 +587,12 @@ def parse_entity_relation(text):
         except json.JSONDecodeError:
             logger.error(f"关系提取结果JSON解析失败: {result}")
             return []
-            
+
     except Exception as e:
         logger.error(f"实体关系提取失败: {str(e)}")
         return []
-        
+
+
 def handle_txt_graph(node_id, entity, entity_en):
     """处理文本图谱创建"""
     try:
@@ -473,31 +600,31 @@ def handle_txt_graph(node_id, entity, entity_en):
         if node_id is None:
             logger.error("node_id参数不能为None")
             return False
-        
+
         # 确保node_id为整数
         try:
             node_id_int = int(node_id)
         except (ValueError, TypeError):
             logger.error(f"node_id不是有效的整数: {node_id}")
             return False
-        
+
         # 创建实体节点
         with neo4j_driver.get_session() as session:
             # 查找源节点
             query = "MATCH (n) WHERE id(n) = $node_id RETURN n"
             result = session.run(query, node_id=node_id_int)
-            source_node = result.single()["n"]
-            
+            source_record = result.single()
+            source_node = source_record["n"] if source_record else None
             if not source_node:
                 return False
-                
+
             # 创建实体节点
             cypher = """
             MERGE (e:Entity {name_zh: $name_zh, name_en: $name_en})
             ON CREATE SET e.create_time = $create_time
             RETURN e
             """
-            
+
             create_time = get_formatted_time()
             result = session.run(
                 cypher,
@@ -505,44 +632,47 @@ def handle_txt_graph(node_id, entity, entity_en):
                 name_en=entity_en,
                 create_time=create_time
             )
-            
-            entity_node = result.single()["e"]
-            
+
+            entity_record = result.single()
+            entity_node = entity_record["e"] if entity_record else None
+
             # 创建关系
             if source_node and entity_node:
                 # 检查关系是否已存在
                 rel_check = """
-                MATCH (s)-[r:CONTAINS]->(e) 
+                MATCH (s)-[r:CONTAINS]->(e)
                 WHERE id(s) = $source_id AND id(e) = $entity_id
                 RETURN r
                 """
-                
+
                 rel_result = session.run(
                     rel_check,
                     source_id=source_node.id,
                     entity_id=entity_node.id
                 )
-                
+
                 # 如果关系不存在,则创建
                 if not rel_result.single():
                     rel_create = """
-                    MATCH (s), (e) 
+                        MATCH (s), (e)
+                    MATCH (s), (e)
                     WHERE id(s) = $source_id AND id(e) = $entity_id
                     CREATE (s)-[r:CONTAINS]->(e)
                     RETURN r
                     """
-                    
+
                     session.run(
                         rel_create,
                         source_id=source_node.id,
                         entity_id=entity_node.id
                     )
-                    
+
             return True
     except Exception as e:
         logger.error(f"文本图谱处理失败: {str(e)}")
         return False
 
+
 def solve_unstructured_data(node_id, minio_client, prefix):
     """处理非结构化数据并提取实体关系"""
     try:
@@ -551,19 +681,24 @@ def solve_unstructured_data(node_id, minio_client, prefix):
         if not node_data:
             logger.error(f"节点不存在: {node_id}")
             return False
-            
+
         # 获取对象路径
         object_name = node_data.get('url')
         if not object_name:
             logger.error(f"文档路径不存在: {node_id}")
             return False
-            
+
         # 获取文件内容
-        file_content = get_file_content(minio_client, bucket_name=node_data.get('bucket_name', 'dataops'), object_name=object_name)
-        
+        file_content = get_file_content(
+            minio_client,
+            bucket_name=node_data.get('bucket_name', 'dataops'),
+            object_name=object_name,
+        )
+
         # 解析文本内容中的实体关系
-        relations = parse_entity_relation(file_content[:5000])  # 只处理前5000字符,避免过大内容
-        
+        relations = parse_entity_relation(
+            file_content[:5000])  # 只处理前5000字符,避免过大内容
+
         # 如果成功提取了关系
         if relations:
             # 更新节点信息
@@ -573,69 +708,81 @@ def solve_unstructured_data(node_id, minio_client, prefix):
                 SET n.processed = true, n.processTime = $process_time
                 RETURN n
                 """
-                
+
                 process_time = get_formatted_time()
                 session.run(
                     update_cypher,
                     node_id=int(node_id),
                     process_time=process_time
                 )
-                
+
                 # 为每个提取的关系创建实体和关系
                 for relation in relations:
                     entity1 = relation.get("entity1", "")
                     relation_type = relation.get("relation", "")
                     entity2 = relation.get("entity2", "")
-                    
+
                     if entity1 and entity2 and relation_type:
                         # 翻译实体名称为英文 - 使用简短直接的输入
                         entity1_en = llm_client(entity1)
                         entity2_en = llm_client(entity2)
-                        
+
                         # 创建第一个实体
                         entity1_cypher = """
                         MERGE (e:Entity {name_zh: $name_zh})
-                        ON CREATE SET e.name_en = $name_en, e.create_time = $create_time
+                        ON CREATE SET e.name_en = $name_en,
+                                      e.create_time = $create_time
                         RETURN e
                         """
-                        
+
                         entity1_result = session.run(
                             entity1_cypher,
                             name_zh=entity1,
                             name_en=entity1_en,
                             create_time=process_time
                         )
-                        entity1_node = entity1_result.single()["e"]
-                        
+                        entity1_record = entity1_result.single()
+                        entity1_node = (
+                            entity1_record["e"] if entity1_record else None
+                        )
+                        if not entity1_node:
+                            continue
+
                         # 创建第二个实体
                         entity2_cypher = """
                         MERGE (e:Entity {name_zh: $name_zh})
-                        ON CREATE SET e.name_en = $name_en, e.create_time = $create_time
+                        ON CREATE SET e.name_en = $name_en,
+                                      e.create_time = $create_time
                         RETURN e
                         """
-                        
+
                         entity2_result = session.run(
                             entity2_cypher,
                             name_zh=entity2,
                             name_en=entity2_en,
                             create_time=process_time
                         )
-                        entity2_node = entity2_result.single()["e"]
-                        
+                        entity2_record = entity2_result.single()
+                        entity2_node = (
+                            entity2_record["e"] if entity2_record else None
+                        )
+                        if not entity2_node:
+                            continue
+
                         # 创建它们之间的关系
-                        rel_cypher = """
+                        rel_cypher: Any = """
                         MATCH (e1:Entity), (e2:Entity)
                         WHERE id(e1) = $entity1_id AND id(e2) = $entity2_id
                         MERGE (e1)-[r:`{relation_type}`]->(e2)
                         RETURN r
                         """.replace("{relation_type}", relation_type)
-                        
+
                         session.run(
                             rel_cypher,
                             entity1_id=entity1_node.id,
                             entity2_id=entity2_node.id
                         )
-                        
+
                         # 创建源节点与实体的关系
                         source_rel1_cypher = """
                         MATCH (s), (e:Entity)
@@ -643,31 +790,31 @@ def solve_unstructured_data(node_id, minio_client, prefix):
                         MERGE (s)-[r:CONTAINS]->(e)
                         RETURN r
                         """
-                        
+
                         session.run(
                             source_rel1_cypher,
                             source_id=int(node_id),
                             entity_id=entity1_node.id
                         )
-                        
+
                         source_rel2_cypher = """
                         MATCH (s), (e:Entity)
                         WHERE id(s) = $source_id AND id(e) = $entity_id
                         MERGE (s)-[r:CONTAINS]->(e)
                         RETURN r
                         """
-                        
+
                         session.run(
                             source_rel2_cypher,
                             source_id=int(node_id),
                             entity_id=entity2_node.id
                         )
-            
+
             return True
         else:
             logger.warning(f"未能从文本中提取到实体关系: {node_id}")
             return False
-            
+
     except Exception as e:
         logger.error(f"处理非结构化数据失败: {str(e)}")
-        return False 
+        return False

+ 107 - 0
docs/api_interface_labellist.md

@@ -0,0 +1,107 @@
+# /labellist 接口说明(DataLabel 列表查询)
+
+本文档描述数据接口模块提供的 DataLabel 列表查询接口,便于前端接入与调试。
+
+## 基本信息
+- **URL**:`/labellist`(最终完整路径取决于 `data_interface` 蓝图注册前缀,例如 `/api/data_interface/labellist`)
+- **方法**:`POST`
+- **内容类型**:`application/json`
+- **返回格式**:`application/json`
+
+## 请求参数(JSON Body)
+| 字段 | 类型 | 必填 | 说明 |
+| --- | --- | --- | --- |
+| current | int | 否 | 页码,默认 1 |
+| size | int | 否 | 每页条数,默认 10 |
+| name_en | str | 否 | 标签英文名模糊匹配 |
+| name_zh | str | 否 | 标签中文名模糊匹配 |
+| category_filter | dict/list/str | 否 | 分类过滤。支持:<br>- `dict`:键为属性名,值为匹配内容,如 `{ "category": "质量", "scope": "公共" }`<br>- `list`:元素为 `{ "field": "...", "value": "..." }` 或单键值对,如 `[{"field":"category","value":"质量"},{"group":"公共"}]`<br>- `str`:等同于按 `category` 字段模糊匹配 |
+| group | str | 否 | 分组名模糊匹配 |
+
+说明:
+- 所有字符串匹配均使用 Cypher `CONTAINS`(大小写敏感视 Neo4j 配置而定)。
+- `category_filter` 会按提供的多个条件叠加 `AND` 过滤。
+
+## 响应字段
+成功时(`code=200`,`message="success"`):
+```json
+{
+  "code": 200,
+  "message": "success",
+  "data": {
+    "records": [
+      {
+        "id": 123,
+        "name_zh": "示例标签",
+        "name_en": "sample_label",
+        "category": "质量",
+        "group": "公共",
+        "describe": null,
+        "scope": null,
+        "number": 4
+      }
+    ],
+    "total": 57,
+    "size": 10,
+    "current": 1
+  }
+}
+```
+
+失败时(例如参数错误或 Neo4j 异常):
+```json
+{
+  "code": 500,
+  "message": "错误描述",
+  "data": {}
+}
+```
+
+## 请求示例
+```http
+POST /labellist
+Content-Type: application/json
+
+{
+  "current": 1,
+  "size": 10,
+  "name_zh": "标签",
+  "category_filter": [
+    {"field": "category", "value": "质量"},
+    {"field": "scope", "value": "公共"}
+  ],
+  "group": "模型"
+}
+```
+
+## 返回示例
+```json
+{
+  "code": 200,
+  "message": "success",
+  "data": {
+    "records": [
+      {
+        "id": 321,
+        "name_zh": "标签-质量",
+        "name_en": "label_quality",
+        "category": "质量",
+        "group": "模型",
+        "describe": null,
+        "scope": null,
+        "number": 2
+      }
+    ],
+    "total": 5,
+    "size": 10,
+    "current": 1
+  }
+}
+```
+
+## 前端对接提示
+- 必须以 `POST + JSON` 调用;若使用 Fetch/axios,设置 `headers: { "Content-Type": "application/json" }`。
+- 分页字段 `current`、`size` 需为整数,未传时后端使用默认值。
+- `category_filter` 支持多条件 AND 过滤,请确保字段名为合法的 Neo4j 属性名(只含字母、数字、下划线,且非数字开头)。
+- 返回的 `number` 字段表示该标签入度+出度关系数量,可用于前端展示关联数。
+

+ 82 - 0
docs/api_meta_node_graph.md

@@ -0,0 +1,82 @@
+# /node/graph 接口前端操作指南(元数据图谱查询)
+
+## 基本信息
+- **URL**:`/node/graph`(实际完整路径取决于 meta_data 蓝图前缀,例如 `/api/meta_data/node/graph`)
+- **方法**:`POST`
+- **请求体**:`application/json`
+- **返回格式**:`application/json`
+
+## 功能概述
+提交一个 `nodeId`,返回:
+- `node`:该节点的属性信息(即便没有任何关联关系也会返回)。
+-.`related_nodes`:与该节点存在关系的其他节点(包含属性)。
+- `relationships`:关系列表,含 `source`、`target`、`type`、`id`。
+
+## 请求参数
+| 字段 | 类型 | 必填 | 说明 |
+| --- | --- | --- | --- |
+| nodeId | int | 是 | Neo4j 节点 ID,必须为整数 |
+
+错误时返回:
+- `code=500`,`message` 为错误描述(如 `nodeId 必须为整数`)。
+
+## 响应结构
+成功(`code=200`,`message="success"`)示例:
+```json
+{
+  "code": 200,
+  "message": "success",
+  "data": {
+    "node": {
+      "id": 123,
+      "name_zh": "示例节点",
+      "name_en": "sample_node",
+      "...": "其他属性"
+    },
+    "related_nodes": [
+      {
+        "id": 456,
+        "name_zh": "关联节点A",
+        "...": "其他属性"
+      }
+    ],
+    "relationships": [
+      {
+        "id": 789,
+        "source": 123,
+        "target": 456,
+        "type": "REL_TYPE"
+      }
+    ]
+  }
+}
+```
+
+当节点存在但无任何关系时:
+```json
+{
+  "code": 200,
+  "message": "success",
+  "data": {
+    "node": { "id": 123, "...": "节点属性" },
+    "related_nodes": [],
+    "relationships": []
+  }
+}
+```
+
+## 调用示例
+```http
+POST /node/graph
+Content-Type: application/json
+
+{ "nodeId": 123 }
+```
+
+## 前端接入提示
+- 使用 `POST` 且 `Content-Type: application/json`。
+- `nodeId` 必须为整数;非整数后端会返回错误。
+- 前端可直接用 `data.node` 展示当前节点属性;`data.related_nodes` 渲染侧边列表或标签;`data.relationships` 可用于绘制边。
+- 关系的 `source` 和 `target` 均为 Neo4j 节点 ID,可与 `node` 与 `related_nodes` 对应。
+- 若需区分箭头方向或类型,请使用 `type` 字段做样式映射。
+

+ 4 - 3
scripts/auto_execute_tasks.py

@@ -79,15 +79,16 @@ CHAT_INPUT_POS: Optional[Tuple[int, int]] = None
 # 数据库操作
 # ============================================================================
 def get_db_connection():
-    """获取数据库连接"""
+    """获取数据库连接(使用 production 环境配置)"""
     try:
         import psycopg2
         import sys
 
         sys.path.insert(0, str(WORKSPACE_ROOT))
-        from app.config.config import config, current_env
+        from app.config.config import config
 
-        app_config = config.get(current_env, config['default'])
+        # 强制使用 production 环境的数据库配置
+        app_config = config['production']
         db_uri = app_config.SQLALCHEMY_DATABASE_URI
         return psycopg2.connect(db_uri)
 

+ 12 - 12
scripts/start_task_scheduler.bat

@@ -33,9 +33,9 @@ if not exist "scripts\auto_execute_tasks.py" (
     exit /b 1
 )
 
-REM 检查数据库配置是否存在
-if not exist "mcp-servers\task-manager\config.json" (
-    echo [错误] 未找到数据库配置: mcp-servers\task-manager\config.json
+REM 检查项目配置文件是否存在
+if not exist "app\config\config.py" (
+    echo [错误] 未找到项目配置文件: app\config\config.py
     pause
     exit /b 1
 )
@@ -47,13 +47,13 @@ echo [信息] 当前目录: %cd%
 echo.
 echo 请选择运行模式:
 echo.
-echo   1. 前台运行(可以看到实时日志,按 Ctrl+C 停止)
-echo   2. 后台运行(无窗口,日志输出到 logs\auto_execute.log)
-echo   3. 执行一次(只检查一次 pending 任务)
-echo   4. 前台运行 + 启用自动 Chat
-echo   5. 后台运行 + 启用自动 Chat
+echo   1. 前台运行
+echo   2. 后台运行
+echo   3. 单次执行
+echo   4. 前台运行 + 启用自动Chat
+echo   5. 后台运行 + 启用自动Chat
 echo   6. 查看服务状态
-echo   7. 停止服务
+echo   7. stop_service
 echo   0. 退出
 echo.
 
@@ -74,7 +74,7 @@ exit /b 1
 
 :run_foreground
 echo.
-echo [启动] 前台运行模式(检查间隔: 5分钟)
+echo [启动] 前台运行模式,检查间隔: 5分钟
 echo [提示] 按 Ctrl+C 可停止服务
 echo.
 python scripts\auto_execute_tasks.py --interval 300
@@ -83,7 +83,7 @@ goto :exit
 
 :run_background
 echo.
-echo [启动] 后台运行模式(检查间隔: 5分钟)
+echo [启动] 后台运行模式,检查间隔: 5分钟
 echo [信息] 日志输出到: logs\auto_execute.log
 start /B "" python scripts\auto_execute_tasks.py --interval 300 > logs\auto_execute.log 2>&1
 echo.
@@ -147,7 +147,7 @@ powershell -Command "$processes = Get-WmiObject Win32_Process | Where-Object { $
 
 echo.
 echo ========================================================
-echo                   最近日志(最后 20 行)
+echo                   最近日志 - 最后 20 行
 echo ========================================================
 echo.
 

+ 20 - 0
test_final.json

@@ -0,0 +1,20 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  ""
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

+ 22 - 0
test_fixed.json

@@ -0,0 +1,22 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  [
+
+                                             ]
+                                 }
+                             ],
+                 "size":  10,
+                 "total":  1
+             },
+    "message":  "操作成功"
+}

+ 20 - 0
test_notexist.json

@@ -0,0 +1,20 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  ""
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

+ 22 - 0
test_result.json

@@ -0,0 +1,22 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  [
+
+                                             ]
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

二进制
test_result.txt


+ 22 - 0
test_result2.json

@@ -0,0 +1,22 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  [
+
+                                             ]
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

+ 20 - 0
test_result3.json

@@ -0,0 +1,20 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  ""
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

+ 22 - 0
test_result_new.json

@@ -0,0 +1,22 @@
+{
+    "code":  200,
+    "data":  {
+                 "current":  1,
+                 "records":  [
+                                 {
+                                     "create_time":  "2025-11-28 14:52:12",
+                                     "data_type":  "CHAR(20)",
+                                     "id":  2259,
+                                     "name_en":  "HISKSDM",
+                                     "name_zh":  "HIS科室代码",
+                                     "status":  true,
+                                     "tag":  [
+
+                                             ]
+                                 }
+                             ],
+                 "size":  1,
+                 "total":  220
+             },
+    "message":  "操作成功"
+}

部分文件因为文件数量过多而无法显示