vanna_trainer.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. # vanna_trainer.py
  2. import os
  3. import time
  4. import threading
  5. import queue
  6. import concurrent.futures
  7. from functools import lru_cache
  8. from collections import defaultdict
  9. from typing import List, Dict, Any, Tuple, Optional, Union, Callable
  10. import sys
  11. import os
  12. sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
  13. import app_config
  14. # 设置正确的项目根目录路径
  15. project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  16. # 创建vanna实例
  17. from vanna_llm_factory import create_vanna_instance
  18. vn = create_vanna_instance()
  19. # 直接从配置文件获取模型名称
  20. embedding_model = app_config.EMBEDDING_CONFIG.get('model_name')
  21. print(f"\n===== Embedding模型信息 =====")
  22. print(f"模型名称: {embedding_model}")
  23. if hasattr(app_config, 'EMBEDDING_CONFIG'):
  24. if 'embedding_dimension' in app_config.EMBEDDING_CONFIG:
  25. print(f"向量维度: {app_config.EMBEDDING_CONFIG['embedding_dimension']}")
  26. if 'base_url' in app_config.EMBEDDING_CONFIG:
  27. print(f"API服务: {app_config.EMBEDDING_CONFIG['base_url']}")
  28. print("==============================")
  29. # 从app_config获取其他配置
  30. BATCH_PROCESSING_ENABLED = app_config.BATCH_PROCESSING_ENABLED
  31. BATCH_SIZE = app_config.BATCH_SIZE
  32. MAX_WORKERS = app_config.MAX_WORKERS
  33. # 数据批处理器
  34. class BatchProcessor:
  35. def __init__(self, batch_size=BATCH_SIZE, max_workers=MAX_WORKERS):
  36. self.batch_size = batch_size
  37. self.max_workers = max_workers
  38. self.batches = defaultdict(list)
  39. self.lock = threading.Lock() # 线程安全锁
  40. # 初始化工作线程池
  41. self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
  42. # 是否启用批处理
  43. self.batch_enabled = BATCH_PROCESSING_ENABLED
  44. print(f"[DEBUG] 批处理器初始化: 启用={self.batch_enabled}, 批大小={self.batch_size}, 最大工作线程={self.max_workers}")
  45. def add_item(self, batch_type: str, item: Dict[str, Any]):
  46. """添加一个项目到批处理队列"""
  47. if not self.batch_enabled:
  48. # 如果未启用批处理,直接处理
  49. self._process_single_item(batch_type, item)
  50. return
  51. with self.lock:
  52. self.batches[batch_type].append(item)
  53. if len(self.batches[batch_type]) >= self.batch_size:
  54. batch_items = self.batches[batch_type]
  55. self.batches[batch_type] = []
  56. # 提交批处理任务到线程池
  57. self.executor.submit(self._process_batch, batch_type, batch_items)
  58. def _process_single_item(self, batch_type: str, item: Dict[str, Any]):
  59. """处理单个项目"""
  60. try:
  61. if batch_type == 'ddl':
  62. vn.train(ddl=item['ddl'])
  63. elif batch_type == 'documentation':
  64. vn.train(documentation=item['documentation'])
  65. elif batch_type == 'question_sql':
  66. vn.train(question=item['question'], sql=item['sql'])
  67. print(f"[DEBUG] 单项处理成功: {batch_type}")
  68. except Exception as e:
  69. print(f"[ERROR] 处理 {batch_type} 项目失败: {e}")
  70. def _process_batch(self, batch_type: str, items: List[Dict[str, Any]]):
  71. """处理一批项目"""
  72. print(f"[INFO] 开始批量处理 {len(items)} 个 {batch_type} 项")
  73. start_time = time.time()
  74. try:
  75. # 准备批处理数据
  76. batch_data = []
  77. if batch_type == 'ddl':
  78. for item in items:
  79. batch_data.append({
  80. 'type': 'ddl',
  81. 'content': item['ddl']
  82. })
  83. elif batch_type == 'documentation':
  84. for item in items:
  85. batch_data.append({
  86. 'type': 'documentation',
  87. 'content': item['documentation']
  88. })
  89. elif batch_type == 'question_sql':
  90. for item in items:
  91. batch_data.append({
  92. 'type': 'question_sql',
  93. 'question': item['question'],
  94. 'sql': item['sql']
  95. })
  96. # 使用批量添加方法
  97. if hasattr(vn, 'add_batch') and callable(getattr(vn, 'add_batch')):
  98. success = vn.add_batch(batch_data)
  99. if success:
  100. print(f"[INFO] 批量处理成功: {len(items)} 个 {batch_type} 项")
  101. else:
  102. print(f"[WARNING] 批量处理部分失败: {batch_type}")
  103. else:
  104. # 如果没有批处理方法,退回到逐条处理
  105. print(f"[WARNING] 批处理不可用,使用逐条处理: {batch_type}")
  106. for item in items:
  107. self._process_single_item(batch_type, item)
  108. except Exception as e:
  109. print(f"[ERROR] 批处理 {batch_type} 失败: {e}")
  110. # 如果批处理失败,尝试逐条处理
  111. print(f"[INFO] 尝试逐条处理...")
  112. for item in items:
  113. try:
  114. self._process_single_item(batch_type, item)
  115. except Exception as item_e:
  116. print(f"[ERROR] 处理项目失败: {item_e}")
  117. elapsed = time.time() - start_time
  118. print(f"[INFO] 批处理完成 {len(items)} 个 {batch_type} 项,耗时 {elapsed:.2f} 秒")
  119. def flush_all(self):
  120. """强制处理所有剩余项目"""
  121. with self.lock:
  122. for batch_type, items in self.batches.items():
  123. if items:
  124. print(f"[INFO] 正在处理剩余的 {len(items)} 个 {batch_type} 项")
  125. self._process_batch(batch_type, items)
  126. # 清空队列
  127. self.batches = defaultdict(list)
  128. print("[INFO] 所有批处理项目已完成")
  129. def shutdown(self):
  130. """关闭处理器和线程池"""
  131. self.flush_all()
  132. self.executor.shutdown(wait=True)
  133. print("[INFO] 批处理器已关闭")
  134. # 创建全局批处理器实例
  135. batch_processor = BatchProcessor()
  136. # 原始训练函数的批处理增强版本
  137. def train_ddl(ddl_sql: str):
  138. print(f"[DDL] Training on DDL:\n{ddl_sql}")
  139. batch_processor.add_item('ddl', {'ddl': ddl_sql})
  140. def train_documentation(doc: str):
  141. print(f"[DOC] Training on documentation:\n{doc}")
  142. batch_processor.add_item('documentation', {'documentation': doc})
  143. def train_sql_example(sql: str):
  144. """训练单个SQL示例,通过SQL生成相应的问题"""
  145. print(f"[SQL] Training on SQL:\n{sql}")
  146. try:
  147. # 直接调用generate_question方法
  148. question = vn.generate_question(sql=sql)
  149. question = question.strip()
  150. if not question.endswith("?") and not question.endswith("?"):
  151. question += "?"
  152. except Exception as e:
  153. print(f"[ERROR] 生成问题时出错: {e}")
  154. raise Exception(f"无法为SQL生成问题: {e}")
  155. print(f"[SQL] 生成问题: {question}")
  156. # 使用标准方式存储问题-SQL对
  157. batch_processor.add_item('question_sql', {'question': question, 'sql': sql})
  158. def train_question_sql_pair(question: str, sql: str):
  159. print(f"[Q-S] Training on:\nquestion: {question}\nsql: {sql}")
  160. batch_processor.add_item('question_sql', {'question': question, 'sql': sql})
  161. # 完成训练后刷新所有待处理项
  162. def flush_training():
  163. """强制处理所有待处理的训练项目"""
  164. batch_processor.flush_all()
  165. # 关闭训练器
  166. def shutdown_trainer():
  167. """关闭训练器和相关资源"""
  168. batch_processor.shutdown()