vanna_trainer.py 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209
  1. # vanna_trainer.py
  2. import os
  3. import time
  4. import threading
  5. import queue
  6. import concurrent.futures
  7. from functools import lru_cache
  8. from collections import defaultdict
  9. from typing import List, Dict, Any, Tuple, Optional, Union, Callable
  10. import sys
  11. import os
  12. sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
  13. import app_config
  14. # 设置正确的项目根目录路径
  15. project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
  16. # 创建vanna实例
  17. from vanna_llm_factory import create_vanna_instance
  18. vn = create_vanna_instance()
  19. # 直接从配置文件获取模型名称
  20. embedding_model = app_config.EMBEDDING_CONFIG.get('model_name')
  21. print(f"\n===== Embedding模型信息 =====")
  22. print(f"模型名称: {embedding_model}")
  23. if hasattr(app_config, 'EMBEDDING_CONFIG'):
  24. if 'embedding_dimension' in app_config.EMBEDDING_CONFIG:
  25. print(f"向量维度: {app_config.EMBEDDING_CONFIG['embedding_dimension']}")
  26. if 'base_url' in app_config.EMBEDDING_CONFIG:
  27. print(f"API服务: {app_config.EMBEDDING_CONFIG['base_url']}")
  28. print("==============================")
  29. # 从app_config获取训练批处理配置
  30. BATCH_PROCESSING_ENABLED = app_config.TRAINING_BATCH_PROCESSING_ENABLED
  31. BATCH_SIZE = app_config.TRAINING_BATCH_SIZE
  32. MAX_WORKERS = app_config.TRAINING_MAX_WORKERS
  33. # 训练数据批处理器
  34. # 专门用于优化训练过程的批处理器,将多个训练项目打包处理以提高效率
  35. class BatchProcessor:
  36. def __init__(self, batch_size=BATCH_SIZE, max_workers=MAX_WORKERS):
  37. self.batch_size = batch_size
  38. self.max_workers = max_workers
  39. self.batches = defaultdict(list)
  40. self.lock = threading.Lock() # 线程安全锁
  41. # 初始化工作线程池
  42. self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
  43. # 是否启用批处理
  44. self.batch_enabled = BATCH_PROCESSING_ENABLED
  45. print(f"[DEBUG] 训练批处理器初始化: 启用={self.batch_enabled}, 批大小={self.batch_size}, 最大工作线程={self.max_workers}")
  46. def add_item(self, batch_type: str, item: Dict[str, Any]):
  47. """添加一个项目到批处理队列"""
  48. if not self.batch_enabled:
  49. # 如果未启用批处理,直接处理
  50. self._process_single_item(batch_type, item)
  51. return
  52. with self.lock:
  53. self.batches[batch_type].append(item)
  54. if len(self.batches[batch_type]) >= self.batch_size:
  55. batch_items = self.batches[batch_type]
  56. self.batches[batch_type] = []
  57. # 提交批处理任务到线程池
  58. self.executor.submit(self._process_batch, batch_type, batch_items)
  59. def _process_single_item(self, batch_type: str, item: Dict[str, Any]):
  60. """处理单个项目"""
  61. try:
  62. if batch_type == 'ddl':
  63. vn.train(ddl=item['ddl'])
  64. elif batch_type == 'documentation':
  65. vn.train(documentation=item['documentation'])
  66. elif batch_type == 'question_sql':
  67. vn.train(question=item['question'], sql=item['sql'])
  68. print(f"[DEBUG] 单项处理成功: {batch_type}")
  69. except Exception as e:
  70. print(f"[ERROR] 处理 {batch_type} 项目失败: {e}")
  71. def _process_batch(self, batch_type: str, items: List[Dict[str, Any]]):
  72. """处理一批项目"""
  73. print(f"[INFO] 开始批量处理 {len(items)} 个 {batch_type} 项")
  74. start_time = time.time()
  75. try:
  76. # 准备批处理数据
  77. batch_data = []
  78. if batch_type == 'ddl':
  79. for item in items:
  80. batch_data.append({
  81. 'type': 'ddl',
  82. 'content': item['ddl']
  83. })
  84. elif batch_type == 'documentation':
  85. for item in items:
  86. batch_data.append({
  87. 'type': 'documentation',
  88. 'content': item['documentation']
  89. })
  90. elif batch_type == 'question_sql':
  91. for item in items:
  92. batch_data.append({
  93. 'type': 'question_sql',
  94. 'question': item['question'],
  95. 'sql': item['sql']
  96. })
  97. # 使用批量添加方法
  98. if hasattr(vn, 'add_batch') and callable(getattr(vn, 'add_batch')):
  99. success = vn.add_batch(batch_data)
  100. if success:
  101. print(f"[INFO] 批量处理成功: {len(items)} 个 {batch_type} 项")
  102. else:
  103. print(f"[WARNING] 批量处理部分失败: {batch_type}")
  104. else:
  105. # 如果没有批处理方法,退回到逐条处理
  106. print(f"[WARNING] 批处理不可用,使用逐条处理: {batch_type}")
  107. for item in items:
  108. self._process_single_item(batch_type, item)
  109. except Exception as e:
  110. print(f"[ERROR] 批处理 {batch_type} 失败: {e}")
  111. # 如果批处理失败,尝试逐条处理
  112. print(f"[INFO] 尝试逐条处理...")
  113. for item in items:
  114. try:
  115. self._process_single_item(batch_type, item)
  116. except Exception as item_e:
  117. print(f"[ERROR] 处理项目失败: {item_e}")
  118. elapsed = time.time() - start_time
  119. print(f"[INFO] 批处理完成 {len(items)} 个 {batch_type} 项,耗时 {elapsed:.2f} 秒")
  120. def flush_all(self):
  121. """强制处理所有剩余项目"""
  122. with self.lock:
  123. for batch_type, items in self.batches.items():
  124. if items:
  125. print(f"[INFO] 正在处理剩余的 {len(items)} 个 {batch_type} 项")
  126. self._process_batch(batch_type, items)
  127. # 清空队列
  128. self.batches = defaultdict(list)
  129. print("[INFO] 所有训练批处理项目已完成")
  130. def shutdown(self):
  131. """关闭处理器和线程池"""
  132. self.flush_all()
  133. self.executor.shutdown(wait=True)
  134. print("[INFO] 训练批处理器已关闭")
  135. # 创建全局训练批处理器实例
  136. # 用于所有训练函数的批处理优化
  137. batch_processor = BatchProcessor()
  138. # 原始训练函数的批处理增强版本
  139. def train_ddl(ddl_sql: str):
  140. print(f"[DDL] Training on DDL:\n{ddl_sql}")
  141. batch_processor.add_item('ddl', {'ddl': ddl_sql})
  142. def train_documentation(doc: str):
  143. print(f"[DOC] Training on documentation:\n{doc}")
  144. batch_processor.add_item('documentation', {'documentation': doc})
  145. def train_sql_example(sql: str):
  146. """训练单个SQL示例,通过SQL生成相应的问题"""
  147. print(f"[SQL] Training on SQL:\n{sql}")
  148. try:
  149. # 直接调用generate_question方法
  150. question = vn.generate_question(sql=sql)
  151. question = question.strip()
  152. if not question.endswith("?") and not question.endswith("?"):
  153. question += "?"
  154. except Exception as e:
  155. print(f"[ERROR] 生成问题时出错: {e}")
  156. raise Exception(f"无法为SQL生成问题: {e}")
  157. print(f"[SQL] 生成问题: {question}")
  158. # 使用标准方式存储问题-SQL对
  159. batch_processor.add_item('question_sql', {'question': question, 'sql': sql})
  160. def train_question_sql_pair(question: str, sql: str):
  161. print(f"[Q-S] Training on:\nquestion: {question}\nsql: {sql}")
  162. batch_processor.add_item('question_sql', {'question': question, 'sql': sql})
  163. # 完成训练后刷新所有待处理项
  164. def flush_training():
  165. """强制处理所有待处理的训练项目"""
  166. batch_processor.flush_all()
  167. # 关闭训练器
  168. def shutdown_trainer():
  169. """关闭训练器和相关资源"""
  170. batch_processor.shutdown()