routes.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643
  1. from io import BytesIO, StringIO
  2. import os
  3. import pandas as pd
  4. from flask import request, jsonify, send_file, current_app
  5. from app.api.data_resource import bp
  6. from app.models.result import success, failed
  7. import logging
  8. import json
  9. import re
  10. from minio import Minio
  11. from app.core.graph.graph_operations import MyEncoder
  12. from app.services.neo4j_driver import neo4j_driver
  13. from app.core.data_resource.resource import (
  14. resource_list,
  15. handle_node,
  16. resource_kinship_graph,
  17. resource_impact_all_graph,
  18. model_resource_list,
  19. select_create_ddl,
  20. data_resource_edit,
  21. handle_id_resource,
  22. id_data_search_list,
  23. table_sql,
  24. select_sql,
  25. id_resource_graph
  26. )
  27. from app.core.meta_data import (
  28. translate_and_parse,
  29. infer_column_type,
  30. text_resource_solve,
  31. get_file_content,
  32. get_formatted_time
  33. )
  34. import traceback
  35. from app.core.system.auth import require_auth
  36. logger = logging.getLogger("app")
  37. def get_minio_client():
  38. """获取 MinIO 客户端实例"""
  39. return Minio(
  40. current_app.config['MINIO_HOST'],
  41. access_key=current_app.config['MINIO_USER'],
  42. secret_key=current_app.config['MINIO_PASSWORD'],
  43. secure=current_app.config['MINIO_SECURE']
  44. )
  45. def get_minio_config():
  46. """获取 MinIO 配置"""
  47. return {
  48. 'bucket_name': current_app.config['BUCKET_NAME'],
  49. 'prefix': current_app.config['PREFIX'],
  50. 'allowed_extensions': current_app.config['ALLOWED_EXTENSIONS']
  51. }
  52. def is_english(text):
  53. """检查文本是否为英文"""
  54. return text.isascii() and bool(re.match(r'^[a-zA-Z0-9_\s.,;:!?()\'"-]+$', text))
  55. @bp.route('/translate', methods=['POST'])
  56. def data_resource_translate():
  57. # 获取表单数据
  58. data_resource = request.form.get('data_resource')
  59. meta_data = request.form.get('meta_data')
  60. meta_data_list = json.loads(meta_data)
  61. file = request.files.get('file')
  62. if not data_resource or not meta_data or not file:
  63. return jsonify(failed("缺少必要参数"))
  64. # 构建翻译后的内容组合
  65. translated_meta_data_list = []
  66. for meta_item in meta_data_list:
  67. if is_english(meta_item): # 检查是否为英文
  68. translated_meta_data_list.append(meta_item) # 如果是英文,则直接添加
  69. else:
  70. translated_meta_data_list.append(translate_and_parse(meta_item)[0]) # 否则翻译后添加
  71. # 对 data_resource 进行翻译
  72. translated_data_resource = translate_and_parse(data_resource)
  73. if translated_data_resource and len(translated_data_resource) > 0:
  74. translated_data_resource = translated_data_resource[0]
  75. else:
  76. translated_data_resource = data_resource # 翻译失败时使用原值
  77. try:
  78. # 构建最终的翻译结果
  79. # meta_en = translated_meta_data_list
  80. resource = {"name": data_resource, "en_name": translated_data_resource}
  81. parsed_data = []
  82. # 读取文件内容
  83. file_content = file.read()
  84. # 重置文件指针
  85. file.seek(0)
  86. try:
  87. df = pd.read_excel(BytesIO(file_content))
  88. except Exception as e:
  89. return jsonify(failed(f"文件格式错误: {str(e)}"))
  90. # 获取列名和对应的数据类型
  91. columns_and_types = infer_column_type(df)
  92. for i in range(len(meta_data_list)):
  93. zh = meta_data_list[i]
  94. en = translated_meta_data_list[i]
  95. data_type = columns_and_types[i] if i < len(columns_and_types) else "varchar(255)"
  96. parsed_item = {"name": zh, "en_name": en, "data_type": data_type}
  97. parsed_data.append(parsed_item)
  98. response_data = {
  99. "head_data": parsed_data,
  100. "data_resource": resource
  101. }
  102. return jsonify(success(response_data, "success"))
  103. except Exception as e:
  104. return jsonify(failed({}, str(e)))
  105. @bp.route('/save', methods=['POST'])
  106. def data_resource_save():
  107. """保存数据资源"""
  108. try:
  109. # 获取表单数据
  110. # 表单以 receiver 开头时使用下面的方法:
  111. # receiver = request.json.get('receiver', {})
  112. receiver = request.get_json()
  113. additional_info = receiver['additional_info']
  114. # 检查receiver是否存在
  115. if not receiver:
  116. return jsonify(failed("参数不完整:缺少receiver"))
  117. # 检查url是否存在
  118. if 'url' not in receiver:
  119. return jsonify(failed("参数不完整:缺少url"))
  120. file_extension = receiver['url'].split('.')[-1]
  121. if file_extension == 'xlsx' or file_extension == 'xls' or file_extension == 'csv':
  122. head_data = additional_info['head_data']
  123. data_resource = additional_info['data_resource']
  124. if not receiver or not data_resource:
  125. return jsonify(failed("参数不完整"))
  126. # 调用业务逻辑处理数据资源创建
  127. resource_id = handle_node(receiver, head_data, data_resource)
  128. else:
  129. return jsonify(failed("文件格式错误"))
  130. return jsonify(success({"id": resource_id}))
  131. except Exception as e:
  132. logger.error(f"保存数据资源失败: {str(e)}")
  133. return jsonify(failed(str(e)))
  134. @bp.route('/delete', methods=['POST'])
  135. def data_resource_delete():
  136. """删除数据资源"""
  137. try:
  138. # 获取资源ID
  139. resource_id = request.json.get('id')
  140. if not resource_id:
  141. return jsonify(failed("资源ID不能为空"))
  142. with neo4j_driver.get_session() as session:
  143. # 删除数据资源节点及其关系
  144. cypher = """
  145. MATCH (n:data_resource)
  146. WHERE id(n) = $resource_id
  147. DETACH DELETE n
  148. """
  149. session.run(cypher, resource_id=int(resource_id))
  150. return jsonify(success({"message": "数据资源删除成功"}))
  151. except Exception as e:
  152. logger.error(f"删除数据资源失败: {str(e)}")
  153. return jsonify(failed(str(e)))
  154. @bp.route('/update', methods=['POST'])
  155. def data_resource_update():
  156. """更新数据资源"""
  157. try:
  158. # 获取更新数据
  159. data = request.json
  160. if not data or "id" not in data:
  161. return jsonify(failed("参数不完整"))
  162. # 调用业务逻辑更新数据资源
  163. updated_data = data_resource_edit(data)
  164. return jsonify(success(updated_data))
  165. except Exception as e:
  166. logger.error(f"更新数据资源失败: {str(e)}")
  167. return jsonify(failed(str(e)))
  168. @bp.route('/ddl', methods=['POST'])
  169. def id_data_ddl():
  170. """解析数据资源的DDL"""
  171. try:
  172. # 获取SQL内容
  173. sql_content = request.json.get('sql', '')
  174. if not sql_content:
  175. return jsonify(failed("SQL内容不能为空"))
  176. # 记录原始SQL用于调试
  177. logger.debug(f"原始SQL: {sql_content}")
  178. # 提取创建表的DDL语句
  179. create_ddl_list = select_create_ddl(sql_content)
  180. if not create_ddl_list:
  181. return jsonify(failed("未找到有效的CREATE TABLE语句"))
  182. # 解析每个表定义
  183. tables_dict = {} # 最终返回的表字典
  184. for ddl in create_ddl_list:
  185. table_info = table_sql(ddl)
  186. if table_info:
  187. # table_info格式: {"table_name": {"exist": bool, "meta": [...], "table_comment": "..."}}
  188. # 合并到结果字典中
  189. tables_dict.update(table_info)
  190. if not tables_dict:
  191. return jsonify(failed("解析表结构失败"))
  192. # 记录结果
  193. logger.debug(f"解析结果: {json.dumps(tables_dict, ensure_ascii=False)}")
  194. # 直接返回解析结果
  195. return jsonify(success(tables_dict))
  196. except Exception as e:
  197. logger.error(f"解析DDL失败: {str(e)}")
  198. logger.error(traceback.format_exc()) # 添加详细错误堆栈
  199. return jsonify(failed(str(e)))
  200. @bp.route('/list', methods=['POST'])
  201. def data_resource_list():
  202. """获取数据资源列表"""
  203. try:
  204. # 获取分页和筛选参数
  205. page = int(request.json.get('current', 1))
  206. page_size = int(request.json.get('size', 10))
  207. en_name_filter = request.json.get('en_name')
  208. name_filter = request.json.get('name')
  209. type_filter = request.json.get('type', 'all')
  210. category_filter = request.json.get('category')
  211. tag_filter = request.json.get('tag')
  212. # 调用业务逻辑查询数据资源列表
  213. resources, total_count = resource_list(
  214. page,
  215. page_size,
  216. en_name_filter,
  217. name_filter,
  218. type_filter,
  219. category_filter,
  220. tag_filter
  221. )
  222. # 返回结果
  223. return jsonify(success({
  224. "records": resources,
  225. "total": total_count,
  226. "size": page_size,
  227. "current": page
  228. }))
  229. except Exception as e:
  230. logger.error(f"获取数据资源列表失败: {str(e)}")
  231. return jsonify(failed(str(e)))
  232. @bp.route('/search', methods=['POST'])
  233. def id_data_search():
  234. """搜索数据资源关联的元数据"""
  235. try:
  236. # 获取参数
  237. resource_id = request.json.get('id')
  238. if not resource_id:
  239. return jsonify(failed("资源ID不能为空"))
  240. page = int(request.json.get('current', 1))
  241. page_size = int(request.json.get('size', 10))
  242. en_name_filter = request.json.get('en_name')
  243. name_filter = request.json.get('name')
  244. category_filter = request.json.get('category')
  245. tag_filter = request.json.get('tag')
  246. # 调用业务逻辑查询关联元数据
  247. metadata_list, total_count = id_data_search_list(
  248. resource_id,
  249. page,
  250. page_size,
  251. en_name_filter,
  252. name_filter,
  253. category_filter,
  254. tag_filter
  255. )
  256. # 返回结果
  257. return jsonify(success({
  258. "records": metadata_list,
  259. "total": total_count,
  260. "size": page_size,
  261. "current": page
  262. }))
  263. except Exception as e:
  264. logger.error(f"搜索数据资源关联的元数据失败: {str(e)}")
  265. return jsonify(failed(str(e)))
  266. def dynamic_type_conversion(value, target_type):
  267. """动态类型转换"""
  268. if value is None:
  269. return None
  270. if target_type == "int" or target_type == "INT":
  271. return int(value)
  272. elif target_type == "float" or target_type == "FLOAT" or target_type == "double" or target_type == "DOUBLE":
  273. return float(value)
  274. elif target_type == "bool" or target_type == "BOOL" or target_type == "boolean" or target_type == "BOOLEAN":
  275. if isinstance(value, str):
  276. return value.lower() in ('true', 'yes', '1', 't', 'y')
  277. return bool(value)
  278. else:
  279. return str(value)
  280. @bp.route('/graph/all', methods=['POST'])
  281. def data_resource_graph_all():
  282. """获取数据资源完整图谱"""
  283. try:
  284. # 获取参数
  285. resource_id = request.json.get('id')
  286. meta = request.json.get('meta', True)
  287. if not resource_id:
  288. return jsonify(failed("资源ID不能为空"))
  289. # 调用业务逻辑获取图谱
  290. graph_data = resource_impact_all_graph(resource_id, meta)
  291. return jsonify(success(graph_data))
  292. except Exception as e:
  293. logger.error(f"获取数据资源完整图谱失败: {str(e)}")
  294. return jsonify(failed(str(e)))
  295. @bp.route('/graph', methods=['POST'])
  296. def data_resource_list_graph():
  297. """获取数据资源亲缘关系图谱"""
  298. try:
  299. # 获取参数
  300. resource_id = request.json.get('id')
  301. meta = request.json.get('meta', True)
  302. if not resource_id:
  303. return jsonify(failed("资源ID不能为空"))
  304. # 调用业务逻辑获取图谱
  305. graph_data = resource_kinship_graph(resource_id, meta)
  306. return jsonify(success(graph_data))
  307. except Exception as e:
  308. logger.error(f"获取数据资源亲缘关系图谱失败: {str(e)}")
  309. return jsonify(failed(str(e)))
  310. @bp.route('/save/metadata', methods=['POST'])
  311. def id_data_save():
  312. """保存数据资源关联的元数据"""
  313. try:
  314. # 获取参数
  315. resource_id = request.json.get('id')
  316. metadata_list = request.json.get('data', [])
  317. if not resource_id:
  318. return jsonify(failed("资源ID不能为空"))
  319. if not metadata_list:
  320. return jsonify(failed("元数据列表不能为空"))
  321. # 处理元数据保存
  322. with neo4j_driver.get_session() as session:
  323. # 先删除现有关系
  324. cypher_delete = """
  325. MATCH (n:data_resource)-[r:contain]->()
  326. WHERE id(n) = $resource_id
  327. DELETE r
  328. """
  329. session.run(cypher_delete, resource_id=int(resource_id))
  330. # 添加新关系
  331. for meta in metadata_list:
  332. # 创建或获取元数据节点
  333. meta_cypher = """
  334. MERGE (m:Metadata {name: $name})
  335. ON CREATE SET m.en_name = $en_name, m.createTime = $create_time
  336. RETURN m
  337. """
  338. meta_result = session.run(
  339. meta_cypher,
  340. name=meta["name"],
  341. en_name=meta["en_name"],
  342. create_time=meta.get("createTime", get_formatted_time())
  343. )
  344. meta_node = meta_result.single()["m"]
  345. # 创建关系
  346. rel_cypher = """
  347. MATCH (n:data_resource), (m:Metadata)
  348. WHERE id(n) = $resource_id AND id(m) = $meta_id
  349. CREATE (n)-[r:contain]->(m)
  350. RETURN r
  351. """
  352. session.run(
  353. rel_cypher,
  354. resource_id=int(resource_id),
  355. meta_id=meta_node.id
  356. )
  357. return jsonify(success({"message": "元数据保存成功"}))
  358. except Exception as e:
  359. logger.error(f"保存数据资源关联的元数据失败: {str(e)}")
  360. return jsonify(failed(str(e)))
  361. @bp.route('/sql/test', methods=['POST'])
  362. def sql_test():
  363. """测试SQL查询"""
  364. try:
  365. # 获取参数
  366. sql_query = request.json.get('sql', '')
  367. if not sql_query:
  368. return jsonify(failed("SQL查询不能为空"))
  369. # 解析SQL
  370. parsed_sql = select_sql(sql_query)
  371. if not parsed_sql:
  372. return jsonify(failed("解析SQL失败"))
  373. # 返回解析结果
  374. return jsonify(success(parsed_sql))
  375. except Exception as e:
  376. logger.error(f"测试SQL查询失败: {str(e)}")
  377. return jsonify(failed(str(e)))
  378. # 废弃的识别DDL语句方法,该API 与 ddl API 功能类似,但功能简化了
  379. @bp.route('/ddl/identify', methods=['POST'])
  380. def sql_ddl_identify():
  381. """识别DDL语句"""
  382. try:
  383. # 获取参数
  384. sql_content = request.json.get('sql', '')
  385. if not sql_content:
  386. return jsonify(failed("SQL内容不能为空"))
  387. # 提取创建表的DDL语句
  388. create_ddl_list = select_create_ddl(sql_content)
  389. if not create_ddl_list:
  390. return jsonify(failed("未找到有效的CREATE TABLE语句"))
  391. return jsonify(success({"count": len(create_ddl_list)}))
  392. except Exception as e:
  393. logger.error(f"识别DDL语句失败: {str(e)}")
  394. return jsonify(failed(str(e)))
  395. @bp.route('/model/list', methods=['POST'])
  396. def resource_model_list():
  397. """获取模型资源列表"""
  398. try:
  399. # 获取分页和筛选参数
  400. page = int(request.json.get('current', 1))
  401. page_size = int(request.json.get('size', 10))
  402. name_filter = request.json.get('name')
  403. # 调用业务逻辑查询模型资源列表
  404. resources, total_count = model_resource_list(page, page_size, name_filter)
  405. # 返回结果
  406. return jsonify(success({
  407. "records": resources,
  408. "total": total_count,
  409. "size": page_size,
  410. "current": page
  411. }))
  412. except Exception as e:
  413. logger.error(f"获取模型资源列表失败: {str(e)}")
  414. return jsonify(failed(str(e)))
  415. @bp.route('/detail', methods=['POST'])
  416. def data_resource_detail():
  417. """获取数据资源详情"""
  418. try:
  419. # 获取资源ID
  420. resource_id = request.json.get('id')
  421. if not resource_id:
  422. return jsonify(failed("资源ID不能为空"))
  423. # 调用业务逻辑查询数据资源详情
  424. resource_data = handle_id_resource(resource_id)
  425. if not resource_data:
  426. return jsonify(failed("资源不存在"))
  427. return jsonify(success(resource_data))
  428. except Exception as e:
  429. logger.error(f"获取数据资源详情失败: {str(e)}")
  430. return jsonify(failed(str(e)))
  431. @bp.route('/config', methods=['GET'])
  432. @require_auth
  433. def get_resource_config():
  434. """获取数据资源配置信息"""
  435. config = get_minio_config()
  436. return jsonify({
  437. 'allowed_extensions': list(config['allowed_extensions']),
  438. 'bucket_name': config['bucket_name'],
  439. 'prefix': config['prefix']
  440. })
  441. """解析表定义SQL,支持带schema和不带schema两种格式"""
  442. try:
  443. # 支持以下格式:
  444. # 1. CREATE TABLE tablename
  445. # 2. CREATE TABLE "tablename"
  446. # 3. CREATE TABLE schema.tablename
  447. # 4. CREATE TABLE "schema"."tablename"
  448. table_name_pattern = r'CREATE\s+TABLE\s+(?:(?:"([^"]+)"|([^"\s\.]+))\.)?(?:"([^"]+)"|([^"\s\(]+))'
  449. table_name_match = re.search(table_name_pattern, sql, re.IGNORECASE)
  450. if not table_name_match:
  451. return None
  452. # 获取表名,优先使用带引号的名称,如果没有则使用不带引号的
  453. schema = table_name_match.group(1) or table_name_match.group(2) # schema是可选的
  454. table_name = table_name_match.group(3) or table_name_match.group(4) # 实际表名
  455. # 提取字段定义
  456. fields_pattern = r'CREATE\s+TABLE[^(]*\(\s*(.*?)\s*\)'
  457. fields_match = re.search(fields_pattern, sql, re.DOTALL | re.IGNORECASE)
  458. if not fields_match:
  459. return None
  460. fields_text = fields_match.group(1)
  461. # 分割字段定义
  462. field_definitions = []
  463. in_parenthesis = 0
  464. current_field = ""
  465. for char in fields_text:
  466. if char == '(':
  467. in_parenthesis += 1
  468. current_field += char
  469. elif char == ')':
  470. in_parenthesis -= 1
  471. current_field += char
  472. elif char == ',' and in_parenthesis == 0:
  473. field_definitions.append(current_field.strip())
  474. current_field = ""
  475. else:
  476. current_field += char
  477. if current_field.strip():
  478. field_definitions.append(current_field.strip())
  479. # 解析每个字段
  480. fields = []
  481. primary_keys = []
  482. for field_def in field_definitions:
  483. # 忽略PRIMARY KEY等约束定义
  484. if re.match(r'^\s*(?:PRIMARY|UNIQUE|FOREIGN|CHECK|CONSTRAINT)\s+', field_def, re.IGNORECASE):
  485. # 提取主键字段
  486. pk_pattern = r'PRIMARY\s+KEY\s*\(\s*(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))\s*\)'
  487. pk_match = re.search(pk_pattern, field_def, re.IGNORECASE)
  488. if pk_match:
  489. pk = next((g for g in pk_match.groups() if g is not None), "")
  490. primary_keys.append(pk)
  491. continue
  492. # 解析常规字段定义
  493. field_pattern = r'^\s*(?:`([^`]+)`|"([^"]+)"|\'([^\']+)\'|([a-zA-Z0-9_]+))\s+([A-Za-z0-9_]+(?:\s*\([^)]*\))?)'
  494. field_match = re.search(field_pattern, field_def)
  495. if field_match:
  496. # 提取字段名和类型
  497. field_name = next((g for g in field_match.groups()[:4] if g is not None), "")
  498. field_type = field_match.group(5)
  499. # 检查是否为主键
  500. is_primary = "PRIMARY KEY" in field_def.upper()
  501. if is_primary:
  502. primary_keys.append(field_name)
  503. # 检查是否为非空
  504. not_null = "NOT NULL" in field_def.upper()
  505. # 检查默认值
  506. default_match = re.search(r'DEFAULT\s+([^,\s]+)', field_def, re.IGNORECASE)
  507. default_value = default_match.group(1) if default_match else None
  508. # 添加字段信息
  509. field_info = {
  510. "name": field_name,
  511. "type": clean_type(field_type),
  512. "is_primary": is_primary,
  513. "not_null": not_null
  514. }
  515. if default_value:
  516. field_info["default"] = default_value
  517. fields.append(field_info)
  518. # 更新主键标记
  519. for field in fields:
  520. if field["name"] in primary_keys and not field["is_primary"]:
  521. field["is_primary"] = True
  522. # 返回结果,包含schema信息
  523. result = {
  524. "table_name": table_name,
  525. "fields": fields
  526. }
  527. # 如果有schema,添加到结果中
  528. if schema:
  529. result["schema"] = schema
  530. return result
  531. except Exception as e:
  532. logger.error(f"解析表定义SQL失败: {str(e)}")
  533. return None