瀏覽代碼

删除了相关测试代码.

wangxq 1 周之前
父節點
當前提交
d21d87cd3a
共有 62 個文件被更改,包括 0 次插入17957 次删除
  1. 0 864
      test/4. LangGraph 实现自治循环代理(ReAct)及事件流的应用.ipynb
  2. 0 1
      test/__init__.py
  3. 0 289
      test/agent_old.py
  4. 0 1
      test/custom_react_agent/__init__.py
  5. 0 1442
      test/custom_react_agent/agent.py
  6. 0 976
      test/custom_react_agent/api.py
  7. 0 14
      test/custom_react_agent/asgi_app.py
  8. 0 262
      test/custom_react_agent/bak/simple_redis_api.py
  9. 0 158
      test/custom_react_agent/bak/simple_redis_query.py
  10. 0 45
      test/custom_react_agent/config.py
  11. 0 20
      test/custom_react_agent/db_query_decision_prompt.txt
  12. 0 170
      test/custom_react_agent/doc/ASGI_启动说明.md
  13. 0 159
      test/custom_react_agent/doc/FLASK_MIGRATION.md
  14. 0 102
      test/custom_react_agent/doc/MIGRATION_COMPLETE.md
  15. 0 281
      test/custom_react_agent/doc/QUICKSTART.md
  16. 0 205
      test/custom_react_agent/doc/README_API.md
  17. 0 31
      test/custom_react_agent/doc/README_valid_sql_test.md
  18. 0 296
      test/custom_react_agent/doc/agent.py.backup
  19. 0 345
      test/custom_react_agent/doc/api_design.md
  20. 0 109
      test/custom_react_agent/doc/community_help_request.md
  21. 0 1
      test/custom_react_agent/doc/network_troubleshooting.md
  22. 0 112
      test/custom_react_agent/doc/redesign_summary.md
  23. 0 114
      test/custom_react_agent/doc/result 输出结果.md
  24. 0 1246
      test/custom_react_agent/doc/两个关键问题.md
  25. 0 31
      test/custom_react_agent/doc/修改默认用户.md
  26. 0 97
      test/custom_react_agent/doc/参考方案.md
  27. 0 107
      test/custom_react_agent/doc/增强valid()验证.md
  28. 0 144
      test/custom_react_agent/doc/异步改造建议参考.md
  29. 0 844
      test/custom_react_agent/doc/异步改造方案.md
  30. 0 33
      test/custom_react_agent/doc/独立测试说明.md
  31. 0 495
      test/custom_react_agent/enhanced_redis_api.py
  32. 0 20
      test/custom_react_agent/requirements.txt
  33. 0 301
      test/custom_react_agent/shell.py
  34. 0 282
      test/custom_react_agent/sql_tools.py
  35. 0 21
      test/custom_react_agent/state.py
  36. 0 152
      test/custom_react_agent/test/test_api.py
  37. 0 198
      test/custom_react_agent/test/test_api_design.py
  38. 0 116
      test/custom_react_agent/test/test_api_modifications.py
  39. 0 97
      test/custom_react_agent/test/test_asgi_setup.py
  40. 0 234
      test/custom_react_agent/test/test_conversation_api.py
  41. 0 53
      test/custom_react_agent/test/test_fix.py
  42. 0 76
      test/custom_react_agent/test/test_redis_simple.py
  43. 0 71
      test/custom_react_agent/test/test_retry_logic.py
  44. 0 81
      test/custom_react_agent/test/test_shell_features.py
  45. 0 26
      test/custom_react_agent/test/test_simple_api.py
  46. 0 173
      test/custom_react_agent/test/test_valid_sql_simple.py
  47. 0 179
      test/custom_react_agent/test/test_valid_sql_standalone.py
  48. 0 209
      test/custom_react_agent/test/test_validation_improvements.py
  49. 0 255
      test/redis_conversation_demo.py
  50. 0 293
      test/test_ask_agent_redis_integration.py
  51. 0 106
      test/test_config_refactor.py
  52. 0 128
      test/test_config_utils.py
  53. 0 225
      test/test_ollama_integration.py
  54. 0 283
      test/test_redis_conversation_manager.py
  55. 0 111
      test/test_redis_fix_validation.py
  56. 0 94
      test/test_routing_modes.py
  57. 0 146
      test/test_thinking_control.py
  58. 0 294
      test/test_training_integration.py
  59. 0 235
      test/test_vanna_combinations.py
  60. 0 103
      test/test_vanna_singleton.py
  61. 0 4263
      test/vanna_agent_test.ipynb
  62. 0 138
      test/vanna_test.ipynb

File diff suppressed because it is too large
+ 0 - 864
test/4. LangGraph 实现自治循环代理(ReAct)及事件流的应用.ipynb


+ 0 - 1
test/__init__.py

@@ -1 +0,0 @@
-# This file makes the 'test' directory a Python package. 

+ 0 - 289
test/agent_old.py

@@ -1,289 +0,0 @@
-"""
-基于 StateGraph 的、具备上下文感知能力的 React Agent 核心实现
-"""
-import logging
-import json
-import pandas as pd
-from typing import List, Optional, Dict, Any, Tuple
-from contextlib import AsyncExitStack
-
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage, ToolMessage, BaseMessage
-from langgraph.graph import StateGraph, END
-from langgraph.prebuilt import ToolNode
-from redis.asyncio import Redis
-try:
-    from langgraph.checkpoint.redis import AsyncRedisSaver
-except ImportError:
-    AsyncRedisSaver = None
-
-# 从新模块导入配置、状态和工具
-from . import config
-from .state import AgentState
-from .sql_tools import sql_tools
-
-logger = logging.getLogger(__name__)
-
-class CustomReactAgent:
-    """
-    一个使用 StateGraph 构建的、具备上下文感知和持久化能力的 Agent。
-    """
-    def __init__(self):
-        """私有构造函数,请使用 create() 类方法来创建实例。"""
-        self.llm = None
-        self.tools = None
-        self.agent_executor = None
-        self.checkpointer = None
-        self._exit_stack = None
-
-    @classmethod
-    async def create(cls):
-        """异步工厂方法,创建并初始化 CustomReactAgent 实例。"""
-        instance = cls()
-        await instance._async_init()
-        return instance
-
-    async def _async_init(self):
-        """异步初始化所有组件。"""
-        logger.info("🚀 开始初始化 CustomReactAgent...")
-
-        # 1. 初始化 LLM
-        self.llm = ChatOpenAI(
-            api_key=config.QWEN_API_KEY,
-            base_url=config.QWEN_BASE_URL,
-            model=config.QWEN_MODEL,
-            temperature=0.1,
-            model_kwargs={
-                "extra_body": {
-                    "enable_thinking": False,
-                    "misc": {
-                        "ensure_ascii": False
-                    }
-                }
-            }
-        )
-        logger.info(f"   LLM 已初始化,模型: {config.QWEN_MODEL}")
-
-        # 2. 绑定工具
-        self.tools = sql_tools
-        self.llm_with_tools = self.llm.bind_tools(self.tools)
-        logger.info(f"   已绑定 {len(self.tools)} 个工具。")
-
-        # 3. 初始化 Redis Checkpointer
-        if config.REDIS_ENABLED and AsyncRedisSaver is not None:
-            try:
-                self._exit_stack = AsyncExitStack()
-                checkpointer_manager = AsyncRedisSaver.from_conn_string(config.REDIS_URL)
-                self.checkpointer = await self._exit_stack.enter_async_context(checkpointer_manager)
-                await self.checkpointer.asetup()
-                logger.info(f"   AsyncRedisSaver 持久化已启用: {config.REDIS_URL}")
-            except Exception as e:
-                logger.error(f"   ❌ RedisSaver 初始化失败: {e}", exc_info=True)
-                if self._exit_stack:
-                    await self._exit_stack.aclose()
-                self.checkpointer = None
-        else:
-            logger.warning("   Redis 持久化功能已禁用。")
-
-        # 4. 构建 StateGraph
-        self.agent_executor = self._create_graph()
-        logger.info("   StateGraph 已构建并编译。")
-        logger.info("✅ CustomReactAgent 初始化完成。")
-
-    async def close(self):
-        """清理资源,关闭 Redis 连接。"""
-        if self._exit_stack:
-            await self._exit_stack.aclose()
-            self._exit_stack = None
-            self.checkpointer = None
-            logger.info("✅ RedisSaver 资源已通过 AsyncExitStack 释放。")
-
-    def _create_graph(self):
-        """定义并编译 StateGraph。"""
-        builder = StateGraph(AgentState)
-
-        # 定义节点
-        builder.add_node("agent", self._agent_node)
-        builder.add_node("prepare_tool_input", self._prepare_tool_input_node)
-        builder.add_node("tools", ToolNode(self.tools))
-        builder.add_node("update_state_after_tool", self._update_state_after_tool_node)
-        builder.add_node("format_final_response", self._format_final_response_node)
-
-        # 定义边
-        builder.set_entry_point("agent")
-        builder.add_conditional_edges(
-            "agent",
-            self._should_continue,
-            {
-                "continue": "prepare_tool_input",
-                "end": "format_final_response"
-            }
-        )
-        builder.add_edge("prepare_tool_input", "tools")
-        builder.add_edge("tools", "update_state_after_tool")
-        builder.add_edge("update_state_after_tool", "agent")
-        builder.add_edge("format_final_response", END)
-
-        # 编译图,并传入 checkpointer
-        return builder.compile(checkpointer=self.checkpointer)
-
-    def _should_continue(self, state: AgentState) -> str:
-        """判断是继续调用工具还是结束。"""
-        last_message = state["messages"][-1]
-        if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
-            return "end"
-        return "continue"
-
-    def _agent_node(self, state: AgentState) -> Dict[str, Any]:
-        """Agent 节点:调用 LLM 进行思考和决策。"""
-        logger.info(f"🧠 [Node] agent - Thread: {state['thread_id']}")
-        
-        messages_for_llm = list(state["messages"])
-        if state.get("suggested_next_step"):
-            instruction = f"基于之前的步骤,强烈建议你下一步执行 '{state['suggested_next_step']}' 操作。"
-            # 为了避免污染历史,可以考虑不同的注入方式,但这里为了简单直接添加
-            messages_for_llm.append(HumanMessage(content=instruction, name="system_instruction"))
-
-        response = self.llm_with_tools.invoke(messages_for_llm)
-        logger.info(f"   LLM 返回: {response.pretty_print()}")
-        return {"messages": [response]}
-    
-    def _prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-        """信息组装节点:为需要上下文的工具注入历史消息。"""
-        logger.info(f"🛠️ [Node] prepare_tool_input - Thread: {state['thread_id']}")
-        
-        last_message = state["messages"][-1]
-        if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
-            return {}
-
-        # 创建一个新的 AIMessage 来替换,避免直接修改 state 中的对象
-        new_tool_calls = []
-        for tool_call in last_message.tool_calls:
-            if tool_call["name"] == "generate_sql":
-                logger.info("   检测到 generate_sql 调用,注入可序列化的历史消息。")
-                # 复制一份以避免修改原始 tool_call
-                modified_args = tool_call["args"].copy()
-                
-                # 将消息对象列表转换为可序列化的字典列表
-                serializable_history = []
-                for msg in state["messages"]:
-                    serializable_history.append({
-                        "type": msg.type,
-                        "content": msg.content
-                    })
-                
-                modified_args["history_messages"] = serializable_history
-                new_tool_calls.append({
-                    "name": tool_call["name"],
-                    "args": modified_args,
-                    "id": tool_call["id"],
-                })
-            else:
-                new_tool_calls.append(tool_call)
-        
-        # 用包含修改后参数的新消息替换掉原来的
-        last_message.tool_calls = new_tool_calls
-        return {"messages": [last_message]}
-
-    def _update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-        """流程建议与错误处理节点:在工具执行后更新状态。"""
-        logger.info(f"📝 [Node] update_state_after_tool - Thread: {state['thread_id']}")
-        
-        last_tool_message = state['messages'][-1]
-        tool_name = last_tool_message.name
-        tool_output = last_tool_message.content
-        next_step = None
-
-        if tool_name == 'generate_sql':
-            if "失败" in tool_output or "无法生成" in tool_output:
-                next_step = 'answer_with_common_sense'
-                logger.warning(f"   generate_sql 失败,建议下一步: {next_step}")
-            else:
-                next_step = 'valid_sql'
-                logger.info(f"   generate_sql 成功,建议下一步: {next_step}")
-        
-        elif tool_name == 'valid_sql':
-            if "失败" in tool_output:
-                next_step = 'analyze_validation_error'
-                logger.warning(f"   valid_sql 失败,建议下一步: {next_step}")
-            else:
-                next_step = 'run_sql'
-                logger.info(f"   valid_sql 成功,建议下一步: {next_step}")
-
-        elif tool_name == 'run_sql':
-            next_step = 'summarize_final_answer'
-            logger.info(f"   run_sql 执行完毕,建议下一步: {next_step}")
-
-        return {"suggested_next_step": next_step}
-
-    def _format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-        """最终输出格式化节点(当前为占位符)。"""
-        logger.info(f"🎨 [Node] format_final_response - Thread: {state['thread_id']} - 准备格式化最终输出...")
-        # 这里可以添加一个标记,表示这是格式化后的输出
-        last_message = state['messages'][-1]
-        formatted_content = f"[Formatted Output]\n{last_message.content}"
-        last_message.content = formatted_content
-        return {"messages": [last_message]}
-
-    async def chat(self, message: str, user_id: str, thread_id: Optional[str] = None) -> Dict[str, Any]:
-        """
-        处理用户聊天请求。
-        """
-        if not thread_id:
-            thread_id = f"{user_id}:{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
-            logger.info(f"🆕 新建会话,Thread ID: {thread_id}")
-
-        config = {"configurable": {"thread_id": thread_id}}
-        
-        # 定义输入
-        inputs = {
-            "messages": [HumanMessage(content=message)],
-            "user_id": user_id,
-            "thread_id": thread_id,
-            "suggested_next_step": None, # 初始化建议
-        }
-
-        final_state = None
-        try:
-            logger.info(f"🔄 开始处理 - Thread: {thread_id}, User: {user_id}, Message: '{message}'")
-            # 使用 ainvoke 来执行完整的图流程
-            final_state = await self.agent_executor.ainvoke(inputs, config)
-            
-            if final_state and final_state.get("messages"):
-                answer = final_state["messages"][-1].content
-                logger.info(f"✅ 处理完成 - Thread: {thread_id}, Final Answer: '{answer}'")
-                return {"success": True, "answer": answer, "thread_id": thread_id}
-            else:
-                 logger.error(f"❌ 处理异常结束,最终状态为空 - Thread: {thread_id}")
-                 return {"success": False, "error": "Agent failed to produce a final answer.", "thread_id": thread_id}
-
-        except Exception as e:
-            logger.error(f"❌ 处理过程中发生严重错误 - Thread: {thread_id}: {e}", exc_info=True)
-            return {"success": False, "error": str(e), "thread_id": thread_id}
-
-    async def get_conversation_history(self, thread_id: str) -> List[Dict[str, Any]]:
-        """从 checkpointer 获取指定线程的对话历史。"""
-        if not self.checkpointer:
-            return []
-        
-        config = {"configurable": {"thread_id": thread_id}}
-        conversation_state = await self.checkpointer.get(config)
-        
-        if not conversation_state:
-            return []
-            
-        history = []
-        for msg in conversation_state['values'].get('messages', []):
-            if isinstance(msg, HumanMessage):
-                role = "human"
-            elif isinstance(msg, ToolMessage):
-                role = "tool"
-            else: # AIMessage
-                role = "ai"
-            
-            history.append({
-                "type": role,
-                "content": msg.content,
-                "tool_calls": getattr(msg, 'tool_calls', None)
-            })
-        return history 

+ 0 - 1
test/custom_react_agent/__init__.py

@@ -1 +0,0 @@
-# This file makes the 'custom_react_agent' directory a Python package. 

+ 0 - 1442
test/custom_react_agent/agent.py

@@ -1,1442 +0,0 @@
-"""
-基于 StateGraph 的、具备上下文感知能力的 React Agent 核心实现
-"""
-import logging
-import json
-import pandas as pd
-import httpx
-from typing import List, Optional, Dict, Any, Tuple
-from contextlib import AsyncExitStack
-
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage, ToolMessage, BaseMessage, SystemMessage, AIMessage
-from langgraph.graph import StateGraph, END
-from langgraph.prebuilt import ToolNode
-import redis.asyncio as redis
-try:
-    from langgraph.checkpoint.redis import AsyncRedisSaver
-except ImportError:
-    AsyncRedisSaver = None
-
-# 从新模块导入配置、状态和工具
-try:
-    # 尝试相对导入(当作为模块导入时)
-    from . import config
-    from .state import AgentState
-    from .sql_tools import sql_tools
-except ImportError:
-    # 如果相对导入失败,尝试绝对导入(直接运行时)
-    import config
-    from state import AgentState
-    from sql_tools import sql_tools
-from langchain_core.runnables import RunnablePassthrough
-
-logger = logging.getLogger(__name__)
-
-class CustomReactAgent:
-    """
-    一个使用 StateGraph 构建的、具备上下文感知和持久化能力的 Agent。
-    """
-    def __init__(self):
-        """私有构造函数,请使用 create() 类方法来创建实例。"""
-        self.llm = None
-        self.tools = None
-        self.agent_executor = None
-        self.checkpointer = None
-        self._exit_stack = None
-        self.redis_client = None
-
-    @classmethod
-    async def create(cls):
-        """异步工厂方法,创建并初始化 CustomReactAgent 实例。"""
-        instance = cls()
-        await instance._async_init()
-        return instance
-
-    async def _async_init(self):
-        """异步初始化所有组件。"""
-        logger.info("🚀 开始初始化 CustomReactAgent...")
-
-        # 1. 初始化异步Redis客户端
-        self.redis_client = redis.from_url(config.REDIS_URL, decode_responses=True)
-        try:
-            await self.redis_client.ping()
-            logger.info(f"   ✅ Redis连接成功: {config.REDIS_URL}")
-        except Exception as e:
-            logger.error(f"   ❌ Redis连接失败: {e}")
-            raise
-
-        # 2. 初始化 LLM
-        self.llm = ChatOpenAI(
-            api_key=config.QWEN_API_KEY,
-            base_url=config.QWEN_BASE_URL,
-            model=config.QWEN_MODEL,
-            temperature=0.1,
-            timeout=config.NETWORK_TIMEOUT,  # 添加超时配置
-            max_retries=0,  # 禁用OpenAI客户端重试,改用Agent层统一重试
-            extra_body={
-                "enable_thinking": False,
-                "misc": {
-                    "ensure_ascii": False
-                }
-            },
-            # 新增:优化HTTP连接配置
-            http_client=httpx.Client(
-                limits=httpx.Limits(
-                    max_connections=config.HTTP_MAX_CONNECTIONS,
-                    max_keepalive_connections=config.HTTP_MAX_KEEPALIVE_CONNECTIONS,
-                    keepalive_expiry=config.HTTP_KEEPALIVE_EXPIRY,  # 30秒keep-alive过期
-                ),
-                timeout=httpx.Timeout(
-                    connect=config.HTTP_CONNECT_TIMEOUT,   # 连接超时
-                    read=config.NETWORK_TIMEOUT,           # 读取超时
-                    write=config.HTTP_CONNECT_TIMEOUT,     # 写入超时
-                    pool=config.HTTP_POOL_TIMEOUT          # 连接池超时
-                )
-            )
-        )
-        logger.info(f"   LLM 已初始化,模型: {config.QWEN_MODEL}")
-
-        # 3. 绑定工具
-        self.tools = sql_tools
-        self.llm_with_tools = self.llm.bind_tools(self.tools)
-        logger.info(f"   已绑定 {len(self.tools)} 个工具。")
-
-        # 4. 初始化 Redis Checkpointer
-        if config.REDIS_ENABLED and AsyncRedisSaver is not None:
-            try:
-                self._exit_stack = AsyncExitStack()
-                checkpointer_manager = AsyncRedisSaver.from_conn_string(config.REDIS_URL)
-                self.checkpointer = await self._exit_stack.enter_async_context(checkpointer_manager)
-                await self.checkpointer.asetup()
-                logger.info(f"   AsyncRedisSaver 持久化已启用: {config.REDIS_URL}")
-            except Exception as e:
-                logger.error(f"   ❌ RedisSaver 初始化失败: {e}", exc_info=True)
-                if self._exit_stack:
-                    await self._exit_stack.aclose()
-                self.checkpointer = None
-        else:
-            logger.warning("   Redis 持久化功能已禁用。")
-
-        # 5. 构建 StateGraph
-        self.agent_executor = self._create_graph()
-        logger.info("   StateGraph 已构建并编译。")
-        logger.info("✅ CustomReactAgent 初始化完成。")
-
-    async def close(self):
-        """清理资源,关闭 Redis 连接。"""
-        if self._exit_stack:
-            await self._exit_stack.aclose()
-            self._exit_stack = None
-            self.checkpointer = None
-            logger.info("✅ RedisSaver 资源已通过 AsyncExitStack 释放。")
-        
-        if self.redis_client:
-            await self.redis_client.aclose()
-            logger.info("✅ Redis客户端已关闭。")
-
-    def _create_graph(self):
-        """定义并编译最终的、正确的 StateGraph 结构。"""
-        builder = StateGraph(AgentState)
-
-        # 定义所有需要的节点 - 全部改为异步
-        builder.add_node("agent", self._async_agent_node)
-        builder.add_node("prepare_tool_input", self._async_prepare_tool_input_node)
-        builder.add_node("tools", ToolNode(self.tools))
-        builder.add_node("update_state_after_tool", self._async_update_state_after_tool_node)
-        builder.add_node("format_final_response", self._async_format_final_response_node)
-
-        # 建立正确的边连接
-        builder.set_entry_point("agent")
-        builder.add_conditional_edges(
-            "agent",
-            self._async_should_continue,
-            {
-                "continue": "prepare_tool_input",
-                "end": "format_final_response"
-            }
-        )
-        builder.add_edge("prepare_tool_input", "tools")
-        builder.add_edge("tools", "update_state_after_tool")
-        builder.add_edge("update_state_after_tool", "agent")
-        builder.add_edge("format_final_response", END)
-
-        return builder.compile(checkpointer=self.checkpointer)
-
-    async def _async_should_continue(self, state: AgentState) -> str:
-        """异步判断是继续调用工具还是结束。"""
-        last_message = state["messages"][-1]
-        if hasattr(last_message, "tool_calls") and last_message.tool_calls:
-            return "continue"
-        return "end"
-
-    async def _async_agent_node(self, state: AgentState) -> Dict[str, Any]:
-        """异步Agent 节点:使用异步LLM调用。"""
-        logger.info(f"🧠 [Async Node] agent - Thread: {state['thread_id']}")
-        
-        messages_for_llm = list(state["messages"])
-        
-        # 🎯 添加数据库范围系统提示词(每次用户提问时添加)
-        if isinstance(state["messages"][-1], HumanMessage):
-            db_scope_prompt = self._get_database_scope_prompt()
-            if db_scope_prompt:
-                messages_for_llm.insert(0, SystemMessage(content=db_scope_prompt))
-                logger.info("   ✅ 已添加数据库范围判断提示词")
-        
-        # 检查是否需要分析验证错误
-        next_step = state.get("suggested_next_step")
-        
-        # 行为指令与工具建议分离
-        real_tools = {'valid_sql', 'run_sql'}
-        
-        if next_step:
-            if next_step in real_tools:
-                # 场景1: 建议调用一个真实的工具
-                instruction = f"Suggestion: Based on the previous step, please use the '{next_step}' tool to continue."
-                messages_for_llm.append(SystemMessage(content=instruction))
-                logger.info(f"   ✅ 已添加工具建议: {next_step}")
-
-            elif next_step == "analyze_validation_error":
-                # 场景2: 分析SQL验证错误(特殊指令)
-                for msg in reversed(state["messages"]):
-                    if isinstance(msg, ToolMessage) and msg.name == "valid_sql":
-                        error_guidance = self._generate_validation_error_guidance(msg.content)
-                        messages_for_llm.append(SystemMessage(content=error_guidance))
-                        logger.info("   ✅ 已添加SQL验证错误指导")
-                        break
-            
-            elif next_step == 'summarize_final_answer':
-                # 场景3: 总结最终答案(行为指令)
-                instruction = "System Instruction: The SQL query was executed successfully. Please analyze the JSON data in the last message and summarize it in natural, user-friendly language as the final answer. Do not expose the raw JSON data or SQL statements in your response."
-                messages_for_llm.append(SystemMessage(content=instruction))
-                logger.info("   ✅ 已添加 '总结答案' 行为指令")
-
-            elif next_step == 'answer_with_common_sense':
-                # 场景4: 基于常识回答(特殊指令)
-                instruction = (
-                    "无法为当前问题生成有效的SQL查询。失败原因已在上下文中提供。"
-                    "请你直接利用自身的知识库来回答用户的问题,不要再重复解释失败的原因。"
-                )
-                messages_for_llm.append(SystemMessage(content=instruction))
-                logger.info("✅ 已添加 '常识回答' 行为指令")
-
-        # 🛡️ 添加防幻觉系统提示词(重点防止参数篡改)
-        anti_hallucination_prompt = self._get_anti_hallucination_prompt(state)
-        if anti_hallucination_prompt:
-            messages_for_llm.append(SystemMessage(content=anti_hallucination_prompt))
-            logger.info("   🛡️ 已添加防幻觉系统提示词")
-
-        # 🔍 【新增】详细日志:发送给LLM的完整消息列表(按实际提交顺序)
-        logger.info("📤 发送给LLM的完整消息列表和参数:")
-        logger.info(f"   总消息数: {len(messages_for_llm)}")
-        logger.info("   消息详情:")
-        for i, msg in enumerate(messages_for_llm):
-            msg_type = type(msg).__name__
-            content = str(msg.content)
-            
-            # 对于长内容,显示前500字符并标记
-            if len(content) > 500:
-                content_display = content[:500] + f"... (内容被截断,完整长度: {len(content)}字符)"
-            else:
-                content_display = content
-                
-            logger.info(f"   [{i}] {msg_type}:")
-            # 多行显示内容,便于阅读
-            for line in content_display.split('\n'):
-                logger.info(f"      {line}")
-
-        # 添加重试机制处理网络连接问题
-        import asyncio
-        max_retries = config.MAX_RETRIES
-        for attempt in range(max_retries):
-            try:
-                # 🔍 【调试】打印LLM调用的详细信息
-                logger.info(f"🚀 准备调用LLM (尝试 {attempt + 1}/{max_retries})")
-                logger.info(f"   LLM实例: {type(self.llm_with_tools)}")
-                logger.info(f"   消息数量: {len(messages_for_llm)}")
-                
-                # 🔍 【调试】检查消息格式是否正确
-                for i, msg in enumerate(messages_for_llm):
-                    logger.info(f"   消息[{i}] 类型: {type(msg)}")
-                    logger.info(f"   消息[{i}] 有content: {hasattr(msg, 'content')}")
-                    if hasattr(msg, 'content'):
-                        logger.info(f"   消息[{i}] content类型: {type(msg.content)}")
-                        logger.info(f"   消息[{i}] content长度: {len(str(msg.content))}")
-                
-                # 使用异步调用
-                logger.info("🔄 开始调用LLM...")
-                response = await self.llm_with_tools.ainvoke(messages_for_llm)
-                logger.info("✅ LLM调用完成")
-                
-                # 🔍 【调试】详细的响应检查和日志
-                logger.info(f"   响应类型: {type(response)}")
-                logger.info(f"   响应有content: {hasattr(response, 'content')}")
-                logger.info(f"   响应有tool_calls: {hasattr(response, 'tool_calls')}")
-                logger.info(f"   LLM原始响应内容: '{response.content}'")
-                logger.info(f"   响应内容长度: {len(response.content) if response.content else 0}")
-                logger.info(f"   响应内容类型: {type(response.content)}")
-                if hasattr(response, 'tool_calls'):
-                    logger.info(f"   LLM是否有工具调用: {response.tool_calls}")
-                else:
-                    logger.info(f"   LLM是否有工具调用: 无tool_calls属性")
-
-                if hasattr(response, 'tool_calls') and response.tool_calls:
-                    logger.info(f"   工具调用数量: {len(response.tool_calls)}")
-                    for i, tool_call in enumerate(response.tool_calls):
-                        logger.info(f"   工具调用[{i}]: {tool_call.get('name', 'Unknown')}")
-
-                # 🎯 改进的响应检查和重试逻辑
-                # 检查空响应情况 - 将空响应也视为需要重试的情况
-                if not response.content and not (hasattr(response, 'tool_calls') and response.tool_calls):
-                    logger.warning("   ⚠️ LLM返回空响应且无工具调用")
-                    if attempt < max_retries - 1:
-                        # 空响应也进行重试
-                        wait_time = config.RETRY_BASE_DELAY * (2 ** attempt)
-                        logger.info(f"   🔄 空响应重试,{wait_time}秒后重试...")
-                        await asyncio.sleep(wait_time)
-                        continue
-                    else:
-                        # 所有重试都失败,返回降级回答
-                        logger.error(f"   ❌ 多次尝试仍返回空响应,返回降级回答")
-                        fallback_content = "抱歉,我现在无法正确处理您的问题。请稍后重试或重新表述您的问题。"
-                        fallback_response = AIMessage(content=fallback_content)
-                        return {"messages": [fallback_response]}
-                        
-                elif response.content and response.content.strip() == "":
-                    logger.warning("   ⚠️ LLM返回只包含空白字符的内容")
-                    if attempt < max_retries - 1:
-                        # 空白字符也进行重试
-                        wait_time = config.RETRY_BASE_DELAY * (2 ** attempt)
-                        logger.info(f"   🔄 空白字符重试,{wait_time}秒后重试...")
-                        await asyncio.sleep(wait_time)
-                        continue
-                    else:
-                        # 所有重试都失败,返回降级回答
-                        logger.error(f"   ❌ 多次尝试仍返回空白字符,返回降级回答")
-                        fallback_content = "抱歉,我现在无法正确处理您的问题。请稍后重试或重新表述您的问题。"
-                        fallback_response = AIMessage(content=fallback_content)
-                        return {"messages": [fallback_response]}
-                        
-                elif not response.content and hasattr(response, 'tool_calls') and response.tool_calls:
-                    logger.info("   ✅ LLM只返回工具调用,无文本内容(正常情况)")
-                    
-                # 🎯 最终检查:确保响应是有效的
-                if ((response.content and response.content.strip()) or 
-                    (hasattr(response, 'tool_calls') and response.tool_calls)):
-                    logger.info(f"   ✅ 异步LLM调用成功,返回有效响应")
-                    return {"messages": [response]}
-                else:
-                    # 这种情况理论上不应该发生,但作为最后的保障
-                    logger.error(f"   ❌ 意外的响应格式,进行重试")
-                    if attempt < max_retries - 1:
-                        wait_time = config.RETRY_BASE_DELAY * (2 ** attempt)
-                        logger.info(f"   🔄 意外响应格式重试,{wait_time}秒后重试...")
-                        await asyncio.sleep(wait_time)
-                        continue
-                    else:
-                        fallback_content = "抱歉,我现在无法正确处理您的问题。请稍后重试或重新表述您的问题。"
-                        fallback_response = AIMessage(content=fallback_content)
-                        return {"messages": [fallback_response]}
-                
-            except Exception as e:
-                error_msg = str(e)
-                error_type = type(e).__name__
-                logger.warning(f"   ⚠️ LLM调用失败 (尝试 {attempt + 1}/{max_retries}): {error_type}: {error_msg}")
-                
-                # 🎯 改进的错误分类逻辑:检查异常类型和错误消息
-                is_network_error = False
-                is_parameter_error = False
-                
-                # 1. 检查异常类型
-                network_exception_types = [
-                    'APIConnectionError', 'ConnectTimeout', 'ReadTimeout', 
-                    'TimeoutError', 'APITimeoutError', 'ConnectError', 
-                    'HTTPError', 'RequestException', 'ConnectionError'
-                ]
-                if error_type in network_exception_types:
-                    is_network_error = True
-                    logger.info(f"   📊 根据异常类型判断为网络错误: {error_type}")
-                
-                # 2. 检查BadRequestError中的参数错误
-                if error_type == 'BadRequestError':
-                    # 检查是否是消息格式错误
-                    if any(keyword in error_msg.lower() for keyword in [
-                        'must be followed by tool messages',
-                        'invalid_parameter_error',
-                        'assistant message with "tool_calls"',
-                        'tool_call_id',
-                        'message format'
-                    ]):
-                        is_parameter_error = True
-                        logger.info(f"   📊 根据错误消息判断为参数格式错误: {error_msg[:100]}...")
-                
-                # 3. 检查错误消息内容(不区分大小写)
-                error_msg_lower = error_msg.lower()
-                network_keywords = [
-                    'connection error', 'connect error', 'timeout', 'timed out',
-                    'network', 'connection refused', 'connection reset',
-                    'remote host', '远程主机', '网络连接', '连接超时',
-                    'request timed out', 'read timeout', 'connect timeout'
-                ]
-                
-                for keyword in network_keywords:
-                    if keyword in error_msg_lower:
-                        is_network_error = True
-                        logger.info(f"   📊 根据错误消息判断为网络错误: '{keyword}' in '{error_msg}'")
-                        break
-                
-                # 处理可重试的错误
-                if is_network_error or is_parameter_error:
-                    if attempt < max_retries - 1:
-                        # 渐进式重试间隔:3, 6, 12秒
-                        wait_time = config.RETRY_BASE_DELAY * (2 ** attempt)
-                        error_type_desc = "网络错误" if is_network_error else "参数格式错误"
-                        logger.info(f"   🔄 {error_type_desc},{wait_time}秒后重试...")
-                        
-                        # 🎯 对于参数错误,修复消息历史后重试
-                        if is_parameter_error:
-                            try:
-                                messages_for_llm = await self._handle_parameter_error_with_retry(
-                                    messages_for_llm, error_msg, attempt
-                                )
-                                logger.info(f"   🔧 消息历史修复完成,继续重试...")
-                            except Exception as fix_error:
-                                logger.error(f"   ❌ 消息历史修复失败: {fix_error}")
-                                # 修复失败,使用原始消息继续重试
-                        
-                        await asyncio.sleep(wait_time)
-                        continue
-                    else:
-                        # 所有重试都失败了,返回一个降级的回答
-                        error_type_desc = "网络连接" if is_network_error else "请求格式"
-                        logger.error(f"   ❌ {error_type_desc}持续失败,返回降级回答")
-                        
-                        # 检查是否有SQL执行结果可以利用
-                        sql_data = await self._async_extract_latest_sql_data(state["messages"])
-                        if sql_data:
-                            fallback_content = f"抱歉,由于{error_type_desc}问题,无法生成完整的文字总结。不过查询已成功执行,结果如下:\n\n" + sql_data
-                        else:
-                            fallback_content = f"抱歉,由于{error_type_desc}问题,无法完成此次请求。请稍后重试或检查网络连接。"
-                            
-                        fallback_response = AIMessage(content=fallback_content)
-                        return {"messages": [fallback_response]}
-                else:
-                    # 非网络错误,直接抛出
-                    logger.error(f"   ❌ LLM调用出现非可重试错误: {error_type}: {error_msg}")
-                    raise e
-    
-    def _print_state_info(self, state: AgentState, node_name: str) -> None:
-        """
-        打印 state 的全部信息,用于调试
-        """
-        logger.info(" ~" * 10 + " State Print Start" + " ~" * 10)
-        logger.info(f"📋 [State Debug] {node_name} - 当前状态信息:")
-        
-        # 🎯 打印 state 中的所有字段
-        logger.info("   State中的所有字段:")
-        for key, value in state.items():
-            if key == "messages":
-                logger.info(f"     {key}: {len(value)} 条消息")
-            else:
-                logger.info(f"     {key}: {value}")
-        
-        # 原有的详细消息信息
-        logger.info(f"   用户ID: {state.get('user_id', 'N/A')}")
-        logger.info(f"   线程ID: {state.get('thread_id', 'N/A')}")
-        logger.info(f"   建议下一步: {state.get('suggested_next_step', 'N/A')}")
-        
-        messages = state.get("messages", [])
-        logger.info(f"   消息历史数量: {len(messages)}")
-        
-        if messages:
-            logger.info("   最近的消息:")
-            for i, msg in enumerate(messages[-10:], start=max(0, len(messages)-10)):  # 显示最后10条消息
-                msg_type = type(msg).__name__
-                content = str(msg.content)
-                
-                # 对于长内容,使用多行显示
-                if len(content) > 200:
-                    logger.info(f"     [{i}] {msg_type}:")
-                    logger.info(f"         {content}")
-                else:
-                    logger.info(f"     [{i}] {msg_type}: {content}")
-                
-                # 如果是 AIMessage 且有工具调用,显示工具调用信息
-                if hasattr(msg, 'tool_calls') and msg.tool_calls:
-                    for tool_call in msg.tool_calls:
-                        tool_name = tool_call.get('name', 'Unknown')
-                        tool_args = tool_call.get('args', {})
-                        logger.info(f"         工具调用: {tool_name}")
-                        
-                        # 对于复杂参数,使用JSON格式化
-                        import json
-                        try:
-                            formatted_args = json.dumps(tool_args, ensure_ascii=False, indent=2)
-                            logger.info(f"         参数:")
-                            for line in formatted_args.split('\n'):
-                                logger.info(f"           {line}")
-                        except Exception:
-                            logger.info(f"         参数: {str(tool_args)}")
-        
-        logger.info(" ~" * 10 + " State Print End" + " ~" * 10)
-
-    async def _async_prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-        """
-        准备工具输入。
-        - 强制修正generate_sql的question参数,确保使用用户原始问题。
-        - 为generate_sql注入经过严格过滤的、干净的对话历史。
-        """
-        last_message = state['messages'][-1]
-        if not isinstance(last_message, AIMessage) or not last_message.tool_calls:
-            return {"messages": [last_message]}
-
-        # 强制修正LLM幻觉出的问题
-        for tool_call in last_message.tool_calls:
-            if tool_call['name'] == 'generate_sql':
-                original_user_question = next((msg.content for msg in reversed(state['messages']) if isinstance(msg, HumanMessage)), None)
-                if original_user_question and tool_call['args'].get('question') != original_user_question:
-                    logger.warning(
-                        f"修正 'generate_sql' 的问题参数。\n"
-                        f"  - LLM提供: '{tool_call['args'].get('question')}'\n"
-                        f"  + 修正为: '{original_user_question}'"
-                    )
-                    tool_call['args']['question'] = original_user_question
-
-        # 恢复原始的、更健壮的历史消息过滤和注入逻辑
-        new_tool_calls = []
-        for tool_call in last_message.tool_calls:
-            if tool_call["name"] == "generate_sql":
-                logger.info("检测到 generate_sql 调用,开始注入历史消息。")
-                modified_args = tool_call["args"].copy()
-                
-                clean_history = []
-                
-                # 找到当前用户问题,确保不包含在历史上下文中
-                current_user_question = None
-                messages_for_history = []
-                
-                # 从最后开始找到当前用户问题
-                for i in range(len(state["messages"]) - 1, -1, -1):
-                    msg = state["messages"][i]
-                    if isinstance(msg, HumanMessage):
-                        current_user_question = msg.content
-                        messages_for_history = state["messages"][:i]  # 排除当前用户问题及之后的消息
-                        break
-                
-                # 处理历史消息,确保不包含当前用户问题
-                for msg in messages_for_history:
-                    if isinstance(msg, HumanMessage):
-                        clean_history.append({"type": "human", "content": msg.content})
-                    elif isinstance(msg, AIMessage):
-                        if not msg.tool_calls and msg.content:
-                            # 注释掉 [Formatted Output] 清理逻辑 - 源头已不生成前缀
-                            # clean_content = msg.content.replace("[Formatted Output]\n", "").strip()
-                            clean_content = msg.content.strip()
-                            if clean_content:
-                                clean_history.append({"type": "ai", "content": clean_content})
-                
-                modified_args["history_messages"] = clean_history
-                logger.info(f"注入了 {len(clean_history)} 条过滤后的历史消息")
-                
-                new_tool_calls.append({
-                    "name": tool_call["name"],
-                    "args": modified_args,
-                    "id": tool_call["id"],
-                })
-            else:
-                new_tool_calls.append(tool_call)
-        
-        last_message.tool_calls = new_tool_calls
-        return {"messages": [last_message]}
-
-    def _filter_and_format_history(self, messages: list) -> list:
-        """
-        过滤和格式化历史消息,为generate_sql工具提供干净的上下文。
-        只保留历史中的用户提问和AI的最终回答。
-        """
-        clean_history = []
-        # 处理除最后一个(即当前的工具调用)之外的所有消息
-        messages_to_process = messages[:-1]
-
-        for msg in messages_to_process:
-            if isinstance(msg, HumanMessage):
-                clean_history.append({"type": "human", "content": msg.content})
-            elif isinstance(msg, AIMessage):
-                # 只保留最终的、面向用户的回答(不包含工具调用的纯文本回答)
-                if not msg.tool_calls and msg.content:
-                    # 注释掉 [Formatted Output] 清理逻辑 - 源头已不生成前缀
-                    # clean_content = msg.content.replace("[Formatted Output]\n", "").strip()
-                    clean_content = msg.content.strip()
-                    if clean_content:
-                        clean_history.append({"type": "ai", "content": clean_content})
-        
-        return clean_history
-
-    async def _async_update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-        """在工具执行后,更新 suggested_next_step 并清理参数。"""
-        logger.info(f"📝 [Node] update_state_after_tool - Thread: {state['thread_id']}")
-        
-        # 🎯 打印 state 全部信息
-        self._print_state_info(state, "update_state_after_tool")
-        
-        last_tool_message = state['messages'][-1]
-        tool_name = last_tool_message.name
-        tool_output = last_tool_message.content
-        next_step = None
-
-        if tool_name == 'generate_sql':
-            # 使用 .lower() 将输出转为小写,可以同时捕获 "failed" 和 "Failed" 等情况
-            tool_output_lower = tool_output.lower()
-            if "failed" in tool_output_lower or "无法生成" in tool_output_lower or "失败" in tool_output_lower:
-                next_step = 'answer_with_common_sense'
-            else:
-                next_step = 'valid_sql'
-        
-        elif tool_name == 'valid_sql':
-            if "失败" in tool_output:
-                next_step = 'analyze_validation_error'
-            else:
-                next_step = 'run_sql'
-
-        elif tool_name == 'run_sql':
-            next_step = 'summarize_final_answer'
-            
-        logger.info(f"   Tool '{tool_name}' executed. Suggested next step: {next_step}")
-        return {"suggested_next_step": next_step}
-
-    def _clear_history_messages_parameter(self, messages: List[BaseMessage]) -> None:
-        """
-        将 generate_sql 工具的 history_messages 参数设置为空字符串
-        """
-        for message in messages:
-            if hasattr(message, "tool_calls") and message.tool_calls:
-                for tool_call in message.tool_calls:
-                    if tool_call["name"] == "generate_sql" and "history_messages" in tool_call["args"]:
-                        tool_call["args"]["history_messages"] = ""
-                        logger.info(f"   已将 generate_sql 的 history_messages 设置为空字符串")
-
-    async def _async_format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-        """异步最终输出格式化节点。"""
-        logger.info(f"🎨 [Async Node] format_final_response - Thread: {state['thread_id']}")
-        
-        # 保持原有的消息格式化(用于shell.py兼容)
-        last_message = state['messages'][-1]
-        # 注释掉前缀添加,直接使用原始内容
-        # last_message.content = f"[Formatted Output]\n{last_message.content}"
-        
-        return {"messages": [last_message]}
-
-    async def _async_generate_api_data(self, state: AgentState) -> Dict[str, Any]:
-        """异步生成API格式的数据结构"""
-        logger.info("📊 异步生成API格式数据...")
-        
-        last_message = state['messages'][-1]
-        response_content = last_message.content
-        
-        # 注释掉 [Formatted Output] 清理逻辑 - 源头已不生成前缀
-        # if response_content.startswith("[Formatted Output]\n"):
-        #     response_content = response_content.replace("[Formatted Output]\n", "")
-        
-        api_data = {
-            "response": response_content
-        }
-
-        # --- 新增逻辑:为 answer_with_common_sense 场景拼接响应 ---
-        if state.get("suggested_next_step") == 'answer_with_common_sense':
-            failure_reason = self._find_generate_sql_failure_reason(state['messages'])
-            if failure_reason:
-                # 将 "Database query failed. Reason: " 前缀移除,使其更自然
-                cleaned_reason = failure_reason.replace("Database query failed. Reason:", "").strip()
-                # 拼接失败原因和LLM的常识回答
-                api_data["response"] = f"{cleaned_reason}\n\n{response_content}"
-                logger.info("   ✅ 已成功拼接 '失败原因' 和 '常识回答'")
-        
-        sql_info = await self._async_extract_sql_and_data(state['messages'])
-        if sql_info['sql']:
-            api_data["sql"] = sql_info['sql']
-        if sql_info['records']:
-            api_data["records"] = sql_info['records']
-        
-        # 生成Agent元数据
-        api_data["react_agent_meta"] = await self._async_collect_agent_metadata(state)
-        
-        logger.info(f"   API数据生成完成,包含字段: {list(api_data.keys())}")
-        return api_data
-
-    def _find_generate_sql_failure_reason(self, messages: List[BaseMessage]) -> Optional[str]:
-        """从后向前查找最近一次generate_sql失败的原因"""
-        for msg in reversed(messages):
-            if isinstance(msg, ToolMessage) and msg.name == 'generate_sql':
-                # 找到最近的generate_sql工具消息
-                if "failed" in msg.content.lower() or "失败" in msg.content.lower():
-                    return msg.content
-                else:
-                    # 如果是成功的消息,说明当前轮次没有失败,停止查找
-                    return None
-        return None
-
-    async def _async_extract_sql_and_data(self, messages: List[BaseMessage]) -> Dict[str, Any]:
-        """异步从消息历史中提取SQL和数据记录"""
-        result = {"sql": None, "records": None}
-        
-        # 查找最后一个HumanMessage之后的工具执行结果
-        last_human_index = -1
-        for i in range(len(messages) - 1, -1, -1):
-            if isinstance(messages[i], HumanMessage):
-                last_human_index = i
-                break
-        
-        if last_human_index == -1:
-            return result
-        
-        # 在当前对话轮次中查找工具执行结果
-        current_conversation = messages[last_human_index:]
-        
-        sql_query = None
-        sql_data = None
-        
-        for msg in current_conversation:
-            if isinstance(msg, ToolMessage):
-                if msg.name == 'generate_sql':
-                    # 提取生成的SQL
-                    content = msg.content
-                    if content and not any(keyword in content for keyword in ["失败", "无法生成", "Database query failed"]):
-                        sql_query = content.strip()
-                        
-                elif msg.name == 'run_sql':
-                    # 提取SQL执行结果
-                    try:
-                        import json
-                        parsed_data = json.loads(msg.content)
-                        if isinstance(parsed_data, list) and len(parsed_data) > 0:
-                            # DataFrame.to_json(orient='records') 格式
-                            columns = list(parsed_data[0].keys()) if parsed_data else []
-                            sql_data = {
-                                "columns": columns,
-                                "rows": parsed_data,
-                                "total_row_count": len(parsed_data),
-                                "is_limited": False  # 当前版本没有实现限制
-                            }
-                    except (json.JSONDecodeError, Exception) as e:
-                        logger.warning(f"   解析SQL结果失败: {e}")
-        
-        if sql_query:
-            result["sql"] = sql_query
-        if sql_data:
-            result["records"] = sql_data
-            
-        return result
-
-    async def _async_collect_agent_metadata(self, state: AgentState) -> Dict[str, Any]:
-        """收集Agent元数据"""
-        messages = state['messages']
-        
-        # 统计工具使用情况
-        tools_used = []
-        sql_execution_count = 0
-        context_injected = False
-        
-        # 计算对话轮次(HumanMessage的数量)
-        conversation_rounds = sum(1 for msg in messages if isinstance(msg, HumanMessage))
-        
-        # 分析工具调用和执行
-        for msg in messages:
-            if isinstance(msg, ToolMessage):
-                if msg.name not in tools_used:
-                    tools_used.append(msg.name)
-                if msg.name == 'run_sql':
-                    sql_execution_count += 1
-            elif isinstance(msg, AIMessage) and hasattr(msg, 'tool_calls') and msg.tool_calls:
-                for tool_call in msg.tool_calls:
-                    tool_name = tool_call.get('name')
-                    if tool_name and tool_name not in tools_used:
-                        tools_used.append(tool_name)
-                    
-                    # 检查是否注入了历史上下文
-                    if (tool_name == 'generate_sql' and 
-                        tool_call.get('args', {}).get('history_messages')):
-                        context_injected = True
-        
-        # 构建执行路径(简化版本)
-        execution_path = ["agent"]
-        if tools_used:
-            execution_path.extend(["prepare_tool_input", "tools"])
-        execution_path.append("format_final_response")
-        
-        return {
-            "thread_id": state['thread_id'],
-            "conversation_rounds": conversation_rounds,
-            "tools_used": tools_used,
-            "execution_path": execution_path,
-            "total_messages": len(messages),
-            "sql_execution_count": sql_execution_count,
-            "context_injected": context_injected,
-            "agent_version": "custom_react_v1"
-        }
-
-    async def _async_extract_latest_sql_data(self, messages: List[BaseMessage]) -> Optional[str]:
-        """从消息历史中提取最近的run_sql执行结果,但仅限于当前对话轮次。"""
-        logger.info("🔍 提取最新的SQL执行结果...")
-        
-        # 🎯 只查找最后一个HumanMessage之后的SQL执行结果
-        last_human_index = -1
-        for i in range(len(messages) - 1, -1, -1):
-            if isinstance(messages[i], HumanMessage):
-                last_human_index = i
-                break
-        
-        if last_human_index == -1:
-            logger.info("   未找到用户消息,跳过SQL数据提取")
-            return None
-        
-        # 只在当前对话轮次中查找SQL结果
-        current_conversation = messages[last_human_index:]
-        logger.info(f"   当前对话轮次包含 {len(current_conversation)} 条消息")
-        
-        for msg in reversed(current_conversation):
-            if isinstance(msg, ToolMessage) and msg.name == 'run_sql':
-                logger.info(f"   找到当前对话轮次的run_sql结果: {msg.content[:100]}...")
-                
-                # 🎯 处理Unicode转义序列,将其转换为正常的中文字符
-                try:
-                    # 先尝试解析JSON以验证格式
-                    parsed_data = json.loads(msg.content)
-                    # 重新序列化,确保中文字符正常显示
-                    formatted_content = json.dumps(parsed_data, ensure_ascii=False, separators=(',', ':'))
-                    logger.info(f"   已转换Unicode转义序列为中文字符")
-                    return formatted_content
-                except json.JSONDecodeError:
-                    # 如果不是有效JSON,直接返回原内容
-                    logger.warning(f"   SQL结果不是有效JSON格式,返回原始内容")
-                    return msg.content
-        
-        logger.info("   当前对话轮次中未找到run_sql执行结果")
-        return None
-
-    async def chat(self, message: str, user_id: str, thread_id: Optional[str] = None) -> Dict[str, Any]:
-        """
-        处理用户聊天请求。
-        """
-        if not thread_id:
-            now = pd.Timestamp.now()
-            milliseconds = int(now.microsecond / 1000)
-            thread_id = f"{user_id}:{now.strftime('%Y%m%d%H%M%S')}{milliseconds:03d}"
-            logger.info(f"🆕 新建会话,Thread ID: {thread_id}")
-        
-        config = {
-            "configurable": {
-                "thread_id": thread_id,
-            }
-        }
-        
-        inputs = {
-            "messages": [HumanMessage(content=message)],
-            "user_id": user_id,
-            "thread_id": thread_id,
-            "suggested_next_step": None,
-        }
-
-        try:
-            logger.info(f"🚀 开始处理用户消息: {message[:50]}...")
-            
-            final_state = await self.agent_executor.ainvoke(inputs, config)
-            
-            # 🔍 调试:打印 final_state 的所有 keys
-            logger.info(f"🔍 Final state keys: {list(final_state.keys())}")
-            
-            answer = final_state["messages"][-1].content
-            
-            # 🎯 提取最近的 run_sql 执行结果(不修改messages)
-            sql_data = await self._async_extract_latest_sql_data(final_state["messages"])
-            
-            logger.info(f"✅ 处理完成 - Final Answer: '{answer}'")
-            
-            # 构建返回结果(保持简化格式用于shell.py)
-            result = {
-                "success": True, 
-                "answer": answer, 
-                "thread_id": thread_id
-            }
-            
-            # 只有当存在SQL数据时才添加到返回结果中
-            if sql_data:
-                result["sql_data"] = sql_data
-                logger.info("   📊 已包含SQL原始数据")
-            
-            # 生成API格式数据
-            api_data = await self._async_generate_api_data(final_state)
-            result["api_data"] = api_data
-            logger.info("   🔌 已生成API格式数据")
-            
-            return result
-            
-        except Exception as e:
-            logger.error(f"❌ 处理过程中发生严重错误 - Thread: {thread_id}: {e}", exc_info=True)
-            return {"success": False, "error": str(e), "thread_id": thread_id}
-    
-    async def get_conversation_history(self, thread_id: str) -> List[Dict[str, Any]]:
-        """从 checkpointer 获取指定线程的对话历史。"""
-        if not self.checkpointer:
-            return []
-        
-        config = {"configurable": {"thread_id": thread_id}}
-        try:
-            conversation_state = await self.checkpointer.aget(config)
-        except RuntimeError as e:
-            if "Event loop is closed" in str(e):
-                logger.warning(f"⚠️ Event loop已关闭,尝试重新获取对话历史: {thread_id}")
-                # 如果事件循环关闭,返回空结果而不是抛出异常
-                return []
-            else:
-                raise
-        
-        if not conversation_state:
-            return []
-            
-        history = []
-        messages = conversation_state.get('channel_values', {}).get('messages', [])
-        for msg in messages:
-            if isinstance(msg, HumanMessage):
-                role = "human"
-            elif isinstance(msg, ToolMessage):
-                role = "tool"
-            else: # AIMessage
-                role = "ai"
-            
-            history.append({
-                "type": role,
-                "content": msg.content,
-                "tool_calls": getattr(msg, 'tool_calls', None)
-            })
-        return history 
-
-    async def get_user_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
-        """
-        获取指定用户的最近聊天记录列表
-        利用thread_id格式 'user_id:timestamp' 来查询
-        """
-        if not self.checkpointer:
-            return []
-        
-        try:
-            # 使用统一的异步Redis客户端
-            redis_client = self.redis_client
-            
-            # 1. 扫描匹配该用户的所有checkpoint keys
-            # checkpointer的key格式通常是: checkpoint:thread_id:checkpoint_id
-            pattern = f"checkpoint:{user_id}:*"
-            logger.info(f"🔍 扫描模式: {pattern}")
-            
-            user_threads = {}
-            cursor = 0
-            
-            while True:
-                cursor, keys = await redis_client.scan(
-                    cursor=cursor,
-                    match=pattern,
-                    count=1000
-                )
-                
-
-                
-                for key in keys:
-                    try:
-                        # 解析key获取thread_id和checkpoint信息
-                        # key格式: checkpoint:user_id:timestamp:status:checkpoint_id
-                        key_str = key.decode() if isinstance(key, bytes) else key
-                        parts = key_str.split(':')
-                        
-                        if len(parts) >= 4:
-                            # thread_id = user_id:timestamp
-                            thread_id = f"{parts[1]}:{parts[2]}"
-                            timestamp = parts[2]
-                            
-                            # 跟踪每个thread的最新checkpoint
-                            if thread_id not in user_threads:
-                                user_threads[thread_id] = {
-                                    "thread_id": thread_id,
-                                    "timestamp": timestamp,
-                                    "latest_key": key_str
-                                }
-                            else:
-                                # 保留最新的checkpoint key(通常checkpoint_id越大越新)
-                                if len(parts) > 4 and parts[4] > user_threads[thread_id]["latest_key"].split(':')[4]:
-                                    user_threads[thread_id]["latest_key"] = key_str
-                                    
-                    except Exception as e:
-                        logger.warning(f"解析key {key} 失败: {e}")
-                        continue
-                
-                if cursor == 0:
-                    break
-            
-            # 关闭临时Redis连接
-            await redis_client.close()
-            
-            # 2. 按时间戳排序(新的在前)
-            sorted_threads = sorted(
-                user_threads.values(),
-                key=lambda x: x["timestamp"],
-                reverse=True
-            )[:limit]
-            
-            # 3. 获取每个thread的详细信息
-            conversations = []
-            for thread_info in sorted_threads:
-                try:
-                    thread_id = thread_info["thread_id"]
-                    thread_config = {"configurable": {"thread_id": thread_id}}
-                    
-                    try:
-                        state = await self.checkpointer.aget(thread_config)
-                    except RuntimeError as e:
-                        if "Event loop is closed" in str(e):
-                            logger.warning(f"⚠️ Event loop已关闭,跳过thread: {thread_id}")
-                            continue
-                        else:
-                            raise
-                    
-                    if state and state.get('channel_values', {}).get('messages'):
-                        messages = state['channel_values']['messages']
-                        
-                        # 生成对话预览
-                        preview = self._generate_conversation_preview(messages)
-                        
-                        # 获取最后一条用户消息
-                        last_human_message = None
-                        if messages:
-                            for msg in reversed(messages):
-                                if isinstance(msg, HumanMessage):
-                                    last_human_message = msg.content
-                                    break
-                        
-                        conversations.append({
-                            "conversation_id": thread_id,
-                            "thread_id": thread_id,
-                            "user_id": user_id,
-                            "message_count": len(messages),
-                            "last_message": last_human_message,
-                            "updated_at": self._format_utc_to_china_time(state.get('ts')) if state.get('ts') else None,
-                            "conversation_title": preview,
-                            "created_at": self._format_timestamp(thread_info["timestamp"])
-                        })
-                        
-                except Exception as e:
-                    logger.error(f"获取thread {thread_info['thread_id']} 详情失败: {e}")
-                    continue
-            
-            logger.info(f"✅ 找到用户 {user_id} 的 {len(conversations)} 个对话")
-            return conversations
-            
-        except Exception as e:
-            logger.error(f"❌ 获取用户 {user_id} 对话列表失败: {e}")
-            return []
-
-    def _generate_conversation_preview(self, messages: List[BaseMessage]) -> str:
-        """生成对话预览"""
-        if not messages:
-            return "空对话"
-        
-        # 获取第一个用户消息作为预览
-        for msg in messages:
-            if isinstance(msg, HumanMessage):
-                content = str(msg.content)
-                return content[:50] + "..." if len(content) > 50 else content
-        
-        return "系统消息"
-
-    def _format_timestamp(self, timestamp: str) -> str:
-        """格式化时间戳为可读格式,包含毫秒"""
-        try:
-            # timestamp格式: 20250710123137984
-            if len(timestamp) >= 14:
-                year = timestamp[:4]
-                month = timestamp[4:6]
-                day = timestamp[6:8]
-                hour = timestamp[8:10]
-                minute = timestamp[10:12]
-                second = timestamp[12:14]
-                # 提取毫秒部分(如果存在)
-                millisecond = timestamp[14:17] if len(timestamp) > 14 else "000"
-                return f"{year}-{month}-{day} {hour}:{minute}:{second}.{millisecond}"
-        except Exception:
-            pass
-        return timestamp
-    
-    def _format_utc_to_china_time(self, utc_time_str: str) -> str:
-        """将UTC时间转换为中国时区时间格式"""
-        try:
-            from datetime import datetime, timezone, timedelta
-            
-            # 解析UTC时间字符串
-            # 格式: "2025-07-17T13:21:52.868292+00:00"
-            dt = datetime.fromisoformat(utc_time_str.replace('Z', '+00:00'))
-            
-            # 转换为中国时区 (UTC+8)
-            china_tz = timezone(timedelta(hours=8))
-            china_time = dt.astimezone(china_tz)
-            
-            # 格式化为目标格式: "2025-07-17 21:12:02.456"
-            return china_time.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3]  # 只保留3位毫秒
-        except Exception as e:
-            logger.warning(f"时间格式转换失败: {e}")
-            return utc_time_str 
-
-    def _get_database_scope_prompt(self) -> str:
-        """Get database scope prompt for intelligent query decision making"""
-        try:
-            import os
-            # Read agent/tools/db_query_decision_prompt.txt
-            project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-            db_scope_file = os.path.join(project_root, "agent", "tools", "db_query_decision_prompt.txt")
-            
-            with open(db_scope_file, 'r', encoding='utf-8') as f:
-                db_scope_content = f.read().strip()
-            
-            prompt = f"""You are an intelligent database query assistant. When deciding whether to use database query tools, please follow these rules:
-
-=== DATABASE BUSINESS SCOPE ===
-{db_scope_content}
-
-=== DECISION RULES ===
-1. If the question involves data within the above business scope (service areas, branches, revenue, traffic flow, etc.), use the generate_sql tool
-2. If the question is about general knowledge (like "when do lychees ripen?", weather, historical events, etc.), answer directly based on your knowledge WITHOUT using database tools
-3. When answering general knowledge questions, provide clear and helpful answers without any special prefixes
-
-=== FALLBACK STRATEGY ===
-When generate_sql returns an error message or when queries return no results:
-1. First, check if the question is within the database scope described above
-2. For questions clearly OUTSIDE the database scope (world events, general knowledge, etc.):
-   - Provide the answer based on your knowledge immediately
-   - Give a direct, natural answer without any prefixes or disclaimers
-3. For questions within database scope but queries return no results:
-   - If it's a reasonable question that might have a general answer, provide it naturally
-4. For questions that definitely require specific database data:
-   - Acknowledge the limitation and suggest the data may not be available
-   - Do not attempt to guess or fabricate specific data
-
-Please intelligently choose whether to query the database based on the nature of the user's question,
-not on explaining your decision-making process.
-"""
-            
-            return prompt
-            
-        except Exception as e:
-            logger.warning(f"⚠️ Unable to read database scope description file: {e}")
-            return ""
-
-    def _generate_validation_error_guidance(self, validation_error: str) -> str:
-        """根据验证错误类型生成具体的修复指导"""
-        
-        # 优先处理最常见的语法错误
-        if "语法错误" in validation_error or "syntax error" in validation_error.lower():
-            return """SQL验证失败:语法错误。
-处理建议:
-1. 仔细检查SQL语法(括号、引号、关键词等)
-2. 修复语法错误后,调用 valid_sql 工具重新验证
-3. 常见问题:缺少逗号、括号不匹配、关键词拼写错误"""
-
-        # 新增的合并条件,处理所有“不存在”类型的错误
-        elif ("不存在" in validation_error or 
-              "no such table" in validation_error.lower() or
-              "does not exist" in validation_error.lower()):
-            return """SQL验证失败:表或字段不存在。
-处理建议:
-1. 请明确告知用户,因数据库缺少相应的表或字段,无法通过SQL查询获取准确答案。
-2. 请基于你的通用知识和常识,直接回答用户的问题或提供相关解释。
-3. 请不要再尝试生成或修复SQL。"""
-
-        # 其他原有分支可以被新逻辑覆盖,故移除
-        # Fallback 到通用的错误处理
-        else:
-            return f"""SQL验证失败:{validation_error}
-处理建议:
-1. 如果这是一个可以修复的错误,请尝试修正并再次验证。
-2. 如果错误表明数据缺失,请直接向用户说明情况。
-3. 避免猜测或编造数据库中不存在的信息。"""
-
-    # === 参数错误诊断和修复函数 ===
-    
-    def _diagnose_parameter_error(self, messages: List[BaseMessage], error_msg: str) -> Dict[str, Any]:
-        """
-        诊断参数错误的详细原因
-        """
-        logger.error("🔍 开始诊断参数错误...")
-        logger.error(f"   错误消息: {error_msg}")
-        
-        diagnosis = {
-            "error_type": "parameter_error",
-            "incomplete_tool_calls": [],
-            "orphaned_tool_messages": [],
-            "total_messages": len(messages),
-            "recommended_action": None
-        }
-        
-        # 分析消息历史
-        logger.error("📋 消息历史分析:")
-        for i, msg in enumerate(messages):
-            msg_type = type(msg).__name__
-            
-            if isinstance(msg, AIMessage):
-                has_tool_calls = hasattr(msg, 'tool_calls') and msg.tool_calls
-                content_summary = f"'{msg.content[:50]}...'" if msg.content else "空内容"
-                
-                logger.error(f"   [{i}] {msg_type}: {content_summary}")
-                
-                if has_tool_calls:
-                    logger.error(f"       工具调用: {len(msg.tool_calls)} 个")
-                    for j, tc in enumerate(msg.tool_calls):
-                        tool_name = tc.get('name', 'Unknown')
-                        tool_id = tc.get('id', 'Unknown')
-                        logger.error(f"         [{j}] {tool_name} (ID: {tool_id})")
-                        
-                        # 查找对应的ToolMessage
-                        found_response = False
-                        for k in range(i + 1, len(messages)):
-                            if (isinstance(messages[k], ToolMessage) and 
-                                messages[k].tool_call_id == tool_id):
-                                found_response = True
-                                break
-                            elif isinstance(messages[k], (HumanMessage, AIMessage)):
-                                # 遇到新的对话轮次,停止查找
-                                break
-                        
-                        if not found_response:
-                            diagnosis["incomplete_tool_calls"].append({
-                                "message_index": i,
-                                "tool_name": tool_name,
-                                "tool_id": tool_id,
-                                "ai_message_content": msg.content
-                            })
-                            logger.error(f"         ❌ 未找到对应的ToolMessage!")
-                        else:
-                            logger.error(f"         ✅ 找到对应的ToolMessage")
-            
-            elif isinstance(msg, ToolMessage):
-                logger.error(f"   [{i}] {msg_type}: {msg.name} (ID: {msg.tool_call_id})")
-                
-                # 检查是否有对应的AIMessage
-                found_ai_message = False
-                for k in range(i - 1, -1, -1):
-                    if (isinstance(messages[k], AIMessage) and 
-                        hasattr(messages[k], 'tool_calls') and 
-                        messages[k].tool_calls):
-                        if any(tc.get('id') == msg.tool_call_id for tc in messages[k].tool_calls):
-                            found_ai_message = True
-                            break
-                    elif isinstance(messages[k], HumanMessage):
-                        break
-                
-                if not found_ai_message:
-                    diagnosis["orphaned_tool_messages"].append({
-                        "message_index": i,
-                        "tool_name": msg.name,
-                        "tool_call_id": msg.tool_call_id
-                    })
-                    logger.error(f"       ❌ 未找到对应的AIMessage!")
-            
-            elif isinstance(msg, HumanMessage):
-                logger.error(f"   [{i}] {msg_type}: '{msg.content[:50]}...'")
-        
-        # 生成修复建议
-        if diagnosis["incomplete_tool_calls"]:
-            logger.error(f"🔧 发现 {len(diagnosis['incomplete_tool_calls'])} 个不完整的工具调用")
-            diagnosis["recommended_action"] = "fix_incomplete_tool_calls"
-        elif diagnosis["orphaned_tool_messages"]:
-            logger.error(f"🔧 发现 {len(diagnosis['orphaned_tool_messages'])} 个孤立的工具消息")
-            diagnosis["recommended_action"] = "remove_orphaned_tool_messages"
-        else:
-            logger.error("🔧 未发现明显的消息格式问题")
-            diagnosis["recommended_action"] = "unknown"
-        
-        return diagnosis
-
-    def _fix_by_adding_missing_tool_messages(self, messages: List[BaseMessage], diagnosis: Dict) -> List[BaseMessage]:
-        """
-        通过添加缺失的ToolMessage来修复消息历史
-        """
-        logger.info("🔧 策略1: 补充缺失的ToolMessage")
-        
-        fixed_messages = list(messages)
-        
-        for incomplete in diagnosis["incomplete_tool_calls"]:
-            # 为缺失的工具调用添加错误响应
-            error_tool_message = ToolMessage(
-                content="工具调用已超时或失败,请重新尝试。",
-                tool_call_id=incomplete["tool_id"],
-                name=incomplete["tool_name"]
-            )
-            
-            # 插入到合适的位置
-            insert_index = incomplete["message_index"] + 1
-            fixed_messages.insert(insert_index, error_tool_message)
-            
-            logger.info(f"   ✅ 为工具调用 {incomplete['tool_name']}({incomplete['tool_id']}) 添加错误响应")
-        
-        return fixed_messages
-
-    def _fix_by_removing_incomplete_tool_calls(self, messages: List[BaseMessage], diagnosis: Dict) -> List[BaseMessage]:
-        """
-        通过删除不完整的工具调用来修复消息历史
-        """
-        logger.info("🔧 策略2: 删除不完整的工具调用")
-        
-        fixed_messages = []
-        
-        for i, msg in enumerate(messages):
-            if isinstance(msg, AIMessage) and hasattr(msg, 'tool_calls') and msg.tool_calls:
-                # 检查这个消息是否有不完整的工具调用
-                has_incomplete = any(
-                    inc["message_index"] == i 
-                    for inc in diagnosis["incomplete_tool_calls"]
-                )
-                
-                if has_incomplete:
-                    # 如果有文本内容,保留文本内容但删除工具调用
-                    if msg.content and msg.content.strip():
-                        logger.info(f"   🔧 保留文本内容,删除工具调用: '{msg.content[:50]}...'")
-                        fixed_msg = AIMessage(content=msg.content)
-                        fixed_messages.append(fixed_msg)
-                    else:
-                        # 如果没有文本内容,创建一个说明性的消息
-                        logger.info(f"   🔧 创建说明性消息替换空的工具调用")
-                        fixed_msg = AIMessage(content="我需要重新分析这个问题。")
-                        fixed_messages.append(fixed_msg)
-                else:
-                    fixed_messages.append(msg)
-            else:
-                fixed_messages.append(msg)
-        
-        return fixed_messages
-
-    def _fix_by_rebuilding_history(self, messages: List[BaseMessage]) -> List[BaseMessage]:
-        """
-        重建消息历史,只保留完整的对话轮次
-        """
-        logger.info("🔧 策略3: 重建消息历史")
-        
-        clean_messages = []
-        current_conversation = []
-        
-        for msg in messages:
-            if isinstance(msg, HumanMessage):
-                # 新的对话轮次开始
-                if current_conversation:
-                    # 检查上一轮对话是否完整
-                    if self._is_conversation_complete(current_conversation):
-                        clean_messages.extend(current_conversation)
-                        logger.info(f"   ✅ 保留完整的对话轮次 ({len(current_conversation)} 条消息)")
-                    else:
-                        logger.info(f"   ❌ 跳过不完整的对话轮次 ({len(current_conversation)} 条消息)")
-                
-                current_conversation = [msg]
-            else:
-                current_conversation.append(msg)
-        
-        # 处理最后一轮对话
-        if current_conversation:
-            if self._is_conversation_complete(current_conversation):
-                clean_messages.extend(current_conversation)
-            else:
-                # 最后一轮对话不完整,只保留用户消息
-                clean_messages.extend([msg for msg in current_conversation if isinstance(msg, HumanMessage)])
-        
-        logger.info(f"   📊 重建完成: {len(messages)} -> {len(clean_messages)} 条消息")
-        return clean_messages
-
-    def _is_conversation_complete(self, conversation: List[BaseMessage]) -> bool:
-        """
-        检查对话轮次是否完整
-        """
-        for msg in conversation:
-            if (isinstance(msg, AIMessage) and 
-                hasattr(msg, 'tool_calls') and 
-                msg.tool_calls):
-                # 检查是否有对应的ToolMessage
-                tool_call_ids = [tc.get('id') for tc in msg.tool_calls]
-                found_responses = sum(
-                    1 for m in conversation
-                    if isinstance(m, ToolMessage) and m.tool_call_id in tool_call_ids
-                )
-                if found_responses < len(tool_call_ids):
-                    return False
-        return True
-
-    async def _handle_parameter_error_with_retry(self, messages: List[BaseMessage], error_msg: str, attempt: int) -> List[BaseMessage]:
-        """
-        处理参数错误的完整流程
-        """
-        logger.error(f"🔧 处理参数错误 (重试 {attempt + 1}/3)")
-        
-        # 1. 诊断问题
-        diagnosis = self._diagnose_parameter_error(messages, error_msg)
-        
-        # 2. 根据重试次数选择修复策略
-        if attempt == 0:
-            # 第一次重试:补充缺失的ToolMessage
-            fixed_messages = self._fix_by_adding_missing_tool_messages(messages, diagnosis)
-        elif attempt == 1:
-            # 第二次重试:删除不完整的工具调用
-            fixed_messages = self._fix_by_removing_incomplete_tool_calls(messages, diagnosis)
-        else:
-            # 第三次重试:重建消息历史
-            fixed_messages = self._fix_by_rebuilding_history(messages)
-        
-        logger.info(f"🔧 修复完成: {len(messages)} -> {len(fixed_messages)} 条消息")
-        return fixed_messages
-
-    def _generate_contextual_fallback(self, messages: List[BaseMessage], diagnosis: Dict) -> str:
-        """
-        基于上下文生成合理的回答
-        """
-        # 分析用户的最新问题
-        last_human_message = None
-        for msg in reversed(messages):
-            if isinstance(msg, HumanMessage):
-                last_human_message = msg
-                break
-        
-        if not last_human_message:
-            return "抱歉,我无法理解您的问题。"
-        
-        # 分析是否是数据库相关问题
-        question = last_human_message.content.lower()
-        if any(keyword in question for keyword in ['查询', '数据', '服务区', '收入', '车流量']):
-            return f"抱歉,在处理您关于「{last_human_message.content}」的查询时遇到了技术问题。请稍后重试,或者重新描述您的问题。"
-        else:
-            return "抱歉,我现在无法正确处理您的问题。请稍后重试或重新表述您的问题。"
-
-    def _get_anti_hallucination_prompt(self, state: AgentState) -> str:
-        """
-        生成防幻觉提示词,专注于保持参数原样传递
-        """
-        # 获取当前用户的最新问题
-        last_user_message = None
-        for msg in reversed(state["messages"]):
-            if isinstance(msg, HumanMessage):
-                last_user_message = msg.content
-                break
-        
-        if not last_user_message:
-            return ""
-        
-        prompt = f"""🛡️ 关键指令:工具调用参数必须完全准确
-
-用户当前问题:「{last_user_message}」
-
-调用工具时的严格要求:
-1. **原样传递原则**:question 参数必须与用户问题完全一致,一字不差
-2. **禁止任何改写**:不得进行同义词替换、语言优化或任何形式的修改
-3. **保持专有名词**:所有人名、地名、专业术语必须保持原始表达
-
-❌ 错误示例:
-- 用户问"充电桩",不得改为"充电栋"
-- 用户提到"南城服务区",不得改为"南峡服务区"
-
-✅ 正确做法:
-- 完全复制用户的原始问题作为question参数
-- 保持所有词汇的原始形态
-
-请严格遵守此要求,确保工具调用的准确性。"""
-        
-        return prompt

+ 0 - 976
test/custom_react_agent/api.py

@@ -1,976 +0,0 @@
-"""
-Custom React Agent API 服务
-提供RESTful接口用于智能问答
-"""
-import asyncio
-import logging
-import atexit
-import os
-import sys
-from datetime import datetime
-from typing import Optional, Dict, Any
-
-# 🔧 修复模块路径问题:添加项目根目录到 sys.path
-CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
-PROJECT_ROOT = os.path.abspath(os.path.join(CURRENT_DIR, '..', '..'))
-sys.path.insert(0, CURRENT_DIR)  # 当前目录优先
-sys.path.insert(1, PROJECT_ROOT)  # 项目根目录
-
-from flask import Flask, request, jsonify
-import redis.asyncio as redis
-
-try:
-    # 尝试相对导入(当作为模块导入时)
-    from .agent import CustomReactAgent
-except ImportError:
-    # 如果相对导入失败,尝试绝对导入(直接运行时)
-    from agent import CustomReactAgent
-
-# 配置日志
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-# 全局Agent实例
-_agent_instance: Optional[CustomReactAgent] = None
-_redis_client: Optional[redis.Redis] = None
-
-def validate_request_data(data: Dict[str, Any]) -> Dict[str, Any]:
-    """验证请求数据,并支持从thread_id中推断user_id"""
-    errors = []
-    
-    # 验证 question(必填)
-    question = data.get('question', '')
-    if not question or not question.strip():
-        errors.append('问题不能为空')
-    elif len(question) > 2000:
-        errors.append('问题长度不能超过2000字符')
-    
-    # 优先获取 thread_id
-    thread_id = data.get('thread_id') or data.get('conversation_id')
-    
-    # 获取 user_id,但暂不设置默认值
-    user_id = data.get('user_id')
-
-    # 如果没有传递 user_id,则尝试从 thread_id 中推断
-    if not user_id:
-        if thread_id and ':' in thread_id:
-            inferred_user_id = thread_id.split(':', 1)[0]
-            if inferred_user_id:
-                user_id = inferred_user_id
-                logger.info(f"👤 未提供user_id,从 thread_id '{thread_id}' 中推断出: '{user_id}'")
-            else:
-                # 如果拆分结果为空,则使用默认值
-                user_id = 'guest'
-        else:
-            # 如果 thread_id 不存在或格式不正确,则使用默认值
-            user_id = 'guest'
-
-    # 验证 user_id 长度
-    if user_id and len(user_id) > 50:
-        errors.append('用户ID长度不能超过50字符')
-    
-    # 用户ID与会话ID一致性校验
-    if thread_id:
-        if ':' not in thread_id:
-            errors.append('会话ID格式无效,期望格式为 user_id:timestamp')
-        else:
-            thread_user_id = thread_id.split(':', 1)[0]
-            if thread_user_id != user_id:
-                errors.append(f'会话归属验证失败:会话ID [{thread_id}] 不属于当前用户 [{user_id}]')
-    
-    if errors:
-        raise ValueError('; '.join(errors))
-    
-    return {
-        'question': question.strip(),
-        'user_id': user_id,
-        'thread_id': thread_id  # 可选,不传则自动生成新会话
-    }
-
-async def initialize_agent():
-    """异步初始化Agent"""
-    global _agent_instance, _redis_client
-    
-    if _agent_instance is None:
-        logger.info("🚀 正在异步初始化 Custom React Agent...")
-        try:
-            # 设置环境变量(checkpointer内部需要)
-            os.environ['REDIS_URL'] = 'redis://localhost:6379'
-            
-            # 初始化共享的Redis客户端
-            _redis_client = redis.from_url('redis://localhost:6379', decode_responses=True)
-            await _redis_client.ping()
-            logger.info("✅ Redis客户端连接成功")
-            
-            _agent_instance = await CustomReactAgent.create()
-            logger.info("✅ Agent 异步初始化完成")
-        except Exception as e:
-            logger.error(f"❌ Agent 异步初始化失败: {e}")
-            raise
-
-async def ensure_agent_ready():
-    """异步确保Agent实例可用"""
-    global _agent_instance
-    
-    if _agent_instance is None:
-        await initialize_agent()
-    
-    # 测试Agent是否还可用
-    try:
-        # 简单测试 - 尝试获取一个不存在用户的对话(应该返回空列表)
-        test_result = await _agent_instance.get_user_recent_conversations("__test__", 1)
-        return True
-    except Exception as e:
-        logger.warning(f"⚠️ Agent实例不可用: {e}")
-        # 重新创建Agent实例
-        _agent_instance = None
-        await initialize_agent()
-        return True
-
-# 删除复杂的事件循环管理函数 - 不再需要
-
-async def cleanup_agent():
-    """异步清理Agent资源"""
-    global _agent_instance, _redis_client
-    
-    if _agent_instance:
-        await _agent_instance.close()
-        logger.info("✅ Agent 资源已异步清理")
-        _agent_instance = None
-    
-    if _redis_client:
-        await _redis_client.aclose()
-        logger.info("✅ Redis客户端已异步关闭")
-        _redis_client = None
-
-# 创建Flask应用
-app = Flask(__name__)
-
-# 简化的退出处理
-def cleanup_on_exit():
-    """程序退出时的清理函数"""
-    logger.info("程序退出,资源清理将在异步上下文中进行")
-
-atexit.register(cleanup_on_exit)
-
-@app.route("/")
-def root():
-    """健康检查端点"""
-    return jsonify({"message": "Custom React Agent API 服务正在运行"})
-
-@app.route('/health', methods=['GET'])
-def health_check():
-    """健康检查端点"""
-    try:
-        health_status = {
-            "status": "healthy",
-            "agent_initialized": _agent_instance is not None,
-            "timestamp": datetime.now().isoformat()
-        }
-        return jsonify(health_status), 200
-    except Exception as e:
-        logger.error(f"健康检查失败: {e}")
-        return jsonify({"status": "unhealthy", "error": str(e)}), 500
-
-@app.route("/api/chat", methods=["POST"])
-async def chat_endpoint():
-    """异步智能问答接口"""
-    global _agent_instance
-    
-    # 确保Agent已初始化
-    if not await ensure_agent_ready():
-        return jsonify({
-            "code": 503,
-            "message": "服务未就绪",
-            "success": False,
-            "error": "Agent 初始化失败"
-        }), 503
-    
-    try:
-        # 获取请求数据,处理JSON解析错误
-        try:
-            data = request.get_json(force=True)
-        except Exception as json_error:
-            logger.warning(f"⚠️ JSON解析失败: {json_error}")
-            return jsonify({
-                "code": 400,
-                "message": "请求格式错误",
-                "success": False,
-                "error": "无效的JSON格式,请检查请求体中是否存在语法错误(如多余的逗号、引号不匹配等)",
-                "details": str(json_error)
-            }), 400
-        
-        if not data:
-            return jsonify({
-                "code": 400,
-                "message": "请求参数错误",
-                "success": False,
-                "error": "请求体不能为空"
-            }), 400
-        
-        # 验证请求数据
-        validated_data = validate_request_data(data)
-        
-        logger.info(f"📨 收到请求 - User: {validated_data['user_id']}, Question: {validated_data['question'][:50]}...")
-        
-        # 直接调用异步方法,不需要事件循环包装
-        agent_result = await _agent_instance.chat(
-            message=validated_data['question'],
-            user_id=validated_data['user_id'],
-            thread_id=validated_data['thread_id']
-        )
-        
-        if not agent_result.get("success", False):
-            # Agent处理失败
-            error_msg = agent_result.get("error", "Agent处理失败")
-            logger.error(f"❌ Agent处理失败: {error_msg}")
-            
-            return jsonify({
-                "code": 500,
-                "message": "处理失败",
-                "success": False,
-                "error": error_msg,
-                "data": {
-                    "conversation_id": agent_result.get("thread_id"),  # 新增:conversation_id等于thread_id
-                    "user_id": validated_data['user_id'],  # 新增:返回用户ID
-                    "react_agent_meta": {
-                        "thread_id": agent_result.get("thread_id"),
-                        "agent_version": "custom_react_v1_async",
-                        "execution_path": ["error"]
-                    },
-                    "timestamp": datetime.now().isoformat()
-                }
-            }), 500
-        
-        # Agent处理成功,按照设计文档格式化响应
-        api_data = agent_result.get("api_data", {})
-        
-        # 构建符合设计文档的响应数据
-        response_data = {
-            "response": api_data.get("response", ""),
-            "conversation_id": agent_result.get("thread_id"),  # 新增:conversation_id等于thread_id
-            "user_id": validated_data['user_id'],  # 新增:返回用户ID
-            "react_agent_meta": api_data.get("react_agent_meta", {
-                "thread_id": agent_result.get("thread_id"),
-                "agent_version": "custom_react_v1"
-            }),
-            "timestamp": datetime.now().isoformat()
-        }
-        
-        # 可选字段:SQL(仅当执行SQL时存在)
-        if "sql" in api_data:
-            response_data["sql"] = api_data["sql"]
-        
-        # 可选字段:records(仅当有查询结果时存在)
-        if "records" in api_data:
-            response_data["records"] = api_data["records"]
-        
-        logger.info(f"✅ 请求处理成功 - Thread: {response_data['react_agent_meta'].get('thread_id')}")
-        
-        return jsonify({
-            "code": 200,
-            "message": "操作成功",
-            "success": True,
-            "data": response_data
-        })
-        
-    except ValueError as e:
-        # 参数验证错误
-        error_msg = str(e)
-        logger.warning(f"⚠️ 参数验证失败: {error_msg}")
-        
-        # 根据错误类型提供更友好的消息
-        if "会话归属验证失败" in error_msg:
-            message = "会话归属验证失败"
-        elif "会话ID格式无效" in error_msg:
-            message = "会话ID格式无效"
-        elif "JSON格式" in error_msg:
-            message = "请求格式错误"
-        else:
-            message = "请求参数错误"
-        
-        return jsonify({
-            "code": 400,
-            "message": message,
-            "success": False,
-            "error": error_msg,
-            "error_type": "validation_error"
-        }), 400
-        
-    except Exception as e:
-        # 其他未预期的错误
-        logger.error(f"❌ 未预期的错误: {e}", exc_info=True)
-        return jsonify({
-            "code": 500,
-            "message": "服务器内部错误", 
-            "success": False,
-            "error": "系统异常,请稍后重试"
-        }), 500
-
-@app.route('/api/v0/react/users/<user_id>/conversations', methods=['GET'])
-async def get_user_conversations(user_id: str):
-    """异步获取用户的聊天记录列表"""
-    global _agent_instance
-    
-    try:
-        # 获取查询参数
-        limit = request.args.get('limit', 10, type=int)
-        
-        # 限制limit的范围
-        limit = max(1, min(limit, 50))  # 限制在1-50之间
-        
-        logger.info(f"📋 异步获取用户 {user_id} 的对话列表,限制 {limit} 条")
-        
-        # 确保Agent可用
-        if not await ensure_agent_ready():
-            return jsonify({
-                "success": False,
-                "error": "Agent 未就绪",
-                "timestamp": datetime.now().isoformat()
-            }), 503
-        
-        # 直接调用异步方法
-        conversations = await _agent_instance.get_user_recent_conversations(user_id, limit)
-        
-        return jsonify({
-            "success": True,
-            "data": {
-                "user_id": user_id,
-                "conversations": conversations,
-                "total_count": len(conversations),
-                "limit": limit
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ 异步获取用户 {user_id} 对话列表失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-@app.route('/api/v0/react/users/<user_id>/conversations/<thread_id>', methods=['GET'])
-async def get_user_conversation_detail(user_id: str, thread_id: str):
-    """异步获取特定对话的详细历史"""
-    global _agent_instance
-    
-    try:
-        # 验证thread_id格式是否匹配user_id
-        if not thread_id.startswith(f"{user_id}:"):
-            return jsonify({
-                "success": False,
-                "error": f"Thread ID {thread_id} 不属于用户 {user_id}",
-                "timestamp": datetime.now().isoformat()
-            }), 400
-        
-        logger.info(f"📖 异步获取用户 {user_id} 的对话 {thread_id} 详情")
-        
-        # 确保Agent可用
-        if not await ensure_agent_ready():
-            return jsonify({
-                "success": False,
-                "error": "Agent 未就绪",
-                "timestamp": datetime.now().isoformat()
-            }), 503
-        
-        # 直接调用异步方法
-        history = await _agent_instance.get_conversation_history(thread_id)
-        logger.info(f"✅ 异步成功获取对话历史,消息数量: {len(history)}")
-        
-        if not history:
-            return jsonify({
-                "success": False,
-                "error": f"未找到对话 {thread_id}",
-                "timestamp": datetime.now().isoformat()
-            }), 404
-        
-        return jsonify({
-            "success": True,
-            "data": {
-                "user_id": user_id,
-                "thread_id": thread_id,
-                "message_count": len(history),
-                "messages": history
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        import traceback
-        logger.error(f"❌ 异步获取对话 {thread_id} 详情失败: {e}")
-        logger.error(f"❌ 详细错误信息: {traceback.format_exc()}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-# 简单Redis查询函数(测试用)
-def get_user_conversations_simple_sync(user_id: str, limit: int = 10):
-    """直接从Redis获取用户对话,测试版本"""
-    import redis
-    import json
-    
-    try:
-        # 创建Redis连接
-        redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        redis_client.ping()
-        
-        # 扫描用户的checkpoint keys
-        pattern = f"checkpoint:{user_id}:*"
-        logger.info(f"🔍 扫描模式: {pattern}")
-        
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        logger.info(f"📋 找到 {len(keys)} 个keys")
-        
-        # 解析thread信息
-        thread_data = {}
-        for key in keys:
-            try:
-                parts = key.split(':')
-                if len(parts) >= 4:
-                    thread_id = f"{parts[1]}:{parts[2]}"  # user_id:timestamp
-                    timestamp = parts[2]
-                    
-                    if thread_id not in thread_data:
-                        thread_data[thread_id] = {
-                            "thread_id": thread_id,
-                            "timestamp": timestamp,
-                            "keys": []
-                        }
-                    thread_data[thread_id]["keys"].append(key)
-            except Exception as e:
-                logger.warning(f"解析key失败 {key}: {e}")
-                continue
-        
-        logger.info(f"📊 找到 {len(thread_data)} 个thread")
-        
-        # 按时间戳排序
-        sorted_threads = sorted(
-            thread_data.values(),
-            key=lambda x: x["timestamp"],
-            reverse=True
-        )[:limit]
-        
-        # 获取每个thread的详细信息
-        conversations = []
-        for thread_info in sorted_threads:
-            try:
-                thread_id = thread_info["thread_id"]
-                
-                # 获取最新的checkpoint数据
-                latest_key = max(thread_info["keys"])
-                
-                # 先检查key的数据类型
-                key_type = redis_client.type(latest_key)
-                logger.info(f"🔍 Key {latest_key} 的类型: {key_type}")
-                
-                data = None
-                if key_type == 'string':
-                    data = redis_client.get(latest_key)
-                elif key_type == 'hash':
-                    # 如果是hash类型,获取所有字段
-                    hash_data = redis_client.hgetall(latest_key)
-                    logger.info(f"🔍 Hash字段: {list(hash_data.keys())}")
-                    # 尝试获取可能的数据字段
-                    for field in ['data', 'state', 'value', 'checkpoint']:
-                        if field in hash_data:
-                            data = hash_data[field]
-                            break
-                    if not data and hash_data:
-                        # 如果没找到预期字段,取第一个值试试
-                        data = list(hash_data.values())[0]
-                elif key_type == 'list':
-                    # 如果是list类型,获取最后一个元素
-                    data = redis_client.lindex(latest_key, -1)
-                elif key_type == 'ReJSON-RL':
-                    # 这是RedisJSON类型,使用JSON.GET命令
-                    logger.info(f"🔍 使用JSON.GET获取RedisJSON数据")
-                    try:
-                        # 使用JSON.GET命令获取整个JSON对象
-                        json_data = redis_client.execute_command('JSON.GET', latest_key)
-                        if json_data:
-                            data = json_data  # JSON.GET返回的就是JSON字符串
-                            logger.info(f"🔍 JSON数据长度: {len(data)} 字符")
-                        else:
-                            logger.warning(f"⚠️ JSON.GET 返回空数据")
-                            continue
-                    except Exception as json_error:
-                        logger.error(f"❌ JSON.GET 失败: {json_error}")
-                        continue
-                else:
-                    logger.warning(f"⚠️ 未知的key类型: {key_type}")
-                    continue
-                
-                if data:
-                    try:
-                        checkpoint_data = json.loads(data)
-                        
-                        # 调试:查看JSON数据结构
-                        logger.info(f"🔍 JSON顶级keys: {list(checkpoint_data.keys())}")
-                        
-                        # 根据您提供的JSON结构,消息在 checkpoint.channel_values.messages
-                        messages = []
-                        
-                        # 首先检查是否有checkpoint字段
-                        if 'checkpoint' in checkpoint_data:
-                            checkpoint = checkpoint_data['checkpoint']
-                            if isinstance(checkpoint, dict) and 'channel_values' in checkpoint:
-                                channel_values = checkpoint['channel_values']
-                                if isinstance(channel_values, dict) and 'messages' in channel_values:
-                                    messages = channel_values['messages']
-                                    logger.info(f"🔍 找到messages: {len(messages)} 条消息")
-                        
-                        # 如果没有checkpoint字段,尝试直接在channel_values
-                        if not messages and 'channel_values' in checkpoint_data:
-                            channel_values = checkpoint_data['channel_values']
-                            if isinstance(channel_values, dict) and 'messages' in channel_values:
-                                messages = channel_values['messages']
-                                logger.info(f"🔍 找到messages(直接路径): {len(messages)} 条消息")
-                        
-                        # 生成对话预览
-                        preview = "空对话"
-                        if messages:
-                            for msg in messages:
-                                # 处理LangChain消息格式:{"lc": 1, "type": "constructor", "id": ["langchain", "schema", "messages", "HumanMessage"], "kwargs": {"content": "...", "type": "human"}}
-                                if isinstance(msg, dict):
-                                    # 检查是否是LangChain格式的HumanMessage
-                                    if (msg.get('lc') == 1 and 
-                                        msg.get('type') == 'constructor' and 
-                                        'id' in msg and 
-                                        isinstance(msg['id'], list) and 
-                                        len(msg['id']) >= 4 and
-                                        msg['id'][3] == 'HumanMessage' and
-                                        'kwargs' in msg):
-                                        
-                                        kwargs = msg['kwargs']
-                                        if kwargs.get('type') == 'human' and 'content' in kwargs:
-                                            content = str(kwargs['content'])
-                                            preview = content[:50] + "..." if len(content) > 50 else content
-                                            break
-                                    # 兼容其他格式
-                                    elif msg.get('type') == 'human' and 'content' in msg:
-                                        content = str(msg['content'])
-                                        preview = content[:50] + "..." if len(content) > 50 else content
-                                        break
-                        
-                        conversations.append({
-                            "thread_id": thread_id,
-                            "user_id": user_id,
-                            "timestamp": thread_info["timestamp"],
-                            "message_count": len(messages),
-                            "conversation_preview": preview
-                        })
-                        
-                    except json.JSONDecodeError:
-                        logger.error(f"❌ JSON解析失败,数据类型: {type(data)}, 长度: {len(str(data))}")
-                        logger.error(f"❌ 数据开头: {str(data)[:200]}...")
-                        continue
-                    
-            except Exception as e:
-                logger.error(f"处理thread {thread_info['thread_id']} 失败: {e}")
-                continue
-        
-        redis_client.close()
-        logger.info(f"✅ 返回 {len(conversations)} 个对话")
-        return conversations
-        
-    except Exception as e:
-        logger.error(f"❌ Redis查询失败: {e}")
-        return []
-
-@app.route('/api/test/redis', methods=['GET'])
-def test_redis_connection():
-    """测试Redis连接和基本查询"""
-    try:
-        import redis
-        
-        # 创建Redis连接
-        r = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        r.ping()
-        
-        # 扫描checkpoint keys
-        pattern = "checkpoint:*"
-        keys = []
-        cursor = 0
-        count = 0
-        
-        while True:
-            cursor, batch = r.scan(cursor=cursor, match=pattern, count=100)
-            keys.extend(batch)
-            count += len(batch)
-            if cursor == 0 or count > 500:  # 限制扫描数量
-                break
-        
-        # 统计用户
-        users = {}
-        for key in keys:
-            try:
-                parts = key.split(':')
-                if len(parts) >= 2:
-                    user_id = parts[1]
-                    users[user_id] = users.get(user_id, 0) + 1
-            except:
-                continue
-        
-        r.close()
-        
-        return jsonify({
-            "success": True,
-            "data": {
-                "redis_connected": True,
-                "total_checkpoint_keys": len(keys),
-                "users_found": list(users.keys()),
-                "user_key_counts": users,
-                "sample_keys": keys[:5] if keys else []
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ Redis测试失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-@app.route('/api/v0/react/direct/users/<user_id>/conversations', methods=['GET'])
-def test_get_user_conversations_simple(user_id: str):
-    """测试简单Redis查询获取用户对话列表"""
-    try:
-        limit = request.args.get('limit', 10, type=int)
-        limit = max(1, min(limit, 50))
-        
-        logger.info(f"🧪 测试获取用户 {user_id} 的对话列表(简单Redis方式)")
-        
-        # 使用简单Redis查询
-        conversations = get_user_conversations_simple_sync(user_id, limit)
-        
-        return jsonify({
-            "success": True,
-            "method": "simple_redis_query",
-            "data": {
-                "user_id": user_id,
-                "conversations": conversations,
-                "total_count": len(conversations),
-                "limit": limit
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ 测试简单Redis查询失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-    
-
-# 在 api.py 文件顶部的导入部分添加:
-from enhanced_redis_api import get_conversation_detail_from_redis
-
-# 在 api.py 文件中添加以下新路由:
-
-@app.route('/api/v0/react/direct/conversations/<thread_id>', methods=['GET'])
-def get_conversation_detail_api(thread_id: str):
-    """
-    获取特定对话的详细信息 - 支持include_tools开关参数
-    
-    Query Parameters:
-        - include_tools: bool, 是否包含工具调用信息,默认false
-                        true: 返回完整对话(human/ai/tool/system)
-                        false: 只返回human/ai消息,清理工具调用信息
-        - user_id: str, 可选的用户ID验证
-        
-    Examples:
-        GET /api/conversations/wang:20250709195048728?include_tools=true   # 完整模式
-        GET /api/conversations/wang:20250709195048728?include_tools=false  # 简化模式(默认)
-        GET /api/conversations/wang:20250709195048728                      # 简化模式(默认)
-    """
-    try:
-        # 获取查询参数
-        include_tools = request.args.get('include_tools', 'false').lower() == 'true'
-        user_id = request.args.get('user_id')
-        
-        # 验证thread_id格式
-        if ':' not in thread_id:
-            return jsonify({
-                "success": False,
-                "error": "Invalid thread_id format. Expected format: user_id:timestamp",
-                "timestamp": datetime.now().isoformat()
-            }), 400
-        
-        # 如果提供了user_id,验证thread_id是否属于该用户
-        thread_user_id = thread_id.split(':')[0]
-        if user_id and thread_user_id != user_id:
-            return jsonify({
-                "success": False,
-                "error": f"Thread ID {thread_id} does not belong to user {user_id}",
-                "timestamp": datetime.now().isoformat()
-            }), 400
-        
-        logger.info(f"📖 获取对话详情 - Thread: {thread_id}, Include Tools: {include_tools}")
-        
-        # 从Redis获取对话详情(使用我们的新函数)
-        result = get_conversation_detail_from_redis(thread_id, include_tools)
-        
-        if not result['success']:
-            logger.warning(f"⚠️ 获取对话详情失败: {result['error']}")
-            return jsonify({
-                "success": False,
-                "error": result['error'],
-                "timestamp": datetime.now().isoformat()
-            }), 404
-        
-        # 添加API元数据
-        result['data']['api_metadata'] = {
-            "timestamp": datetime.now().isoformat(),
-            "api_version": "v1",
-            "endpoint": "get_conversation_detail",
-            "query_params": {
-                "include_tools": include_tools,
-                "user_id": user_id
-            }
-        }
-        
-        mode_desc = "完整模式" if include_tools else "简化模式"
-        logger.info(f"✅ 成功获取对话详情 - Messages: {result['data']['message_count']}, Mode: {mode_desc}")
-        
-        return jsonify({
-            "success": True,
-            "data": result['data'],
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        import traceback
-        logger.error(f"❌ 获取对话详情异常: {e}")
-        logger.error(f"❌ 详细错误信息: {traceback.format_exc()}")
-        
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-@app.route('/api/v0/react/direct/conversations/<thread_id>/compare', methods=['GET'])
-def compare_conversation_modes_api(thread_id: str):
-    """
-    比较完整模式和简化模式的对话内容
-    用于调试和理解两种模式的差异
-    
-    Examples:
-        GET /api/conversations/wang:20250709195048728/compare
-    """
-    try:
-        logger.info(f"🔍 比较对话模式 - Thread: {thread_id}")
-        
-        # 获取完整模式
-        full_result = get_conversation_detail_from_redis(thread_id, include_tools=True)
-        
-        # 获取简化模式
-        simple_result = get_conversation_detail_from_redis(thread_id, include_tools=False)
-        
-        if not (full_result['success'] and simple_result['success']):
-            return jsonify({
-                "success": False,
-                "error": "无法获取对话数据进行比较",
-                "timestamp": datetime.now().isoformat()
-            }), 404
-        
-        # 构建比较结果
-        comparison = {
-            "thread_id": thread_id,
-            "full_mode": {
-                "message_count": full_result['data']['message_count'],
-                "stats": full_result['data']['stats'],
-                "sample_messages": full_result['data']['messages'][:3]  # 只显示前3条作为示例
-            },
-            "simple_mode": {
-                "message_count": simple_result['data']['message_count'],
-                "stats": simple_result['data']['stats'],
-                "sample_messages": simple_result['data']['messages'][:3]  # 只显示前3条作为示例
-            },
-            "comparison_summary": {
-                "message_count_difference": full_result['data']['message_count'] - simple_result['data']['message_count'],
-                "tools_filtered_out": full_result['data']['stats'].get('tool_messages', 0),
-                "ai_messages_with_tools": full_result['data']['stats'].get('messages_with_tools', 0),
-                "filtering_effectiveness": "有效" if (full_result['data']['message_count'] - simple_result['data']['message_count']) > 0 else "无差异"
-            },
-            "metadata": {
-                "timestamp": datetime.now().isoformat(),
-                "note": "sample_messages 只显示前3条消息作为示例,完整消息请使用相应的详情API"
-            }
-        }
-        
-        logger.info(f"✅ 模式比较完成 - 完整: {comparison['full_mode']['message_count']}, 简化: {comparison['simple_mode']['message_count']}")
-        
-        return jsonify({
-            "success": True,
-            "data": comparison,
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ 对话模式比较失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-@app.route('/api/v0/react/direct/conversations/<thread_id>/summary', methods=['GET'])
-def get_conversation_summary_api(thread_id: str):
-    """
-    获取对话摘要信息(只包含基本统计,不返回具体消息)
-    
-    Query Parameters:
-        - include_tools: bool, 影响统计信息的计算方式
-        
-    Examples:
-        GET /api/conversations/wang:20250709195048728/summary?include_tools=true
-    """
-    try:
-        include_tools = request.args.get('include_tools', 'false').lower() == 'true'
-        
-        # 验证thread_id格式
-        if ':' not in thread_id:
-            return jsonify({
-                "success": False,
-                "error": "Invalid thread_id format. Expected format: user_id:timestamp",
-                "timestamp": datetime.now().isoformat()
-            }), 400
-        
-        logger.info(f"📊 获取对话摘要 - Thread: {thread_id}, Include Tools: {include_tools}")
-        
-        # 获取完整对话信息
-        result = get_conversation_detail_from_redis(thread_id, include_tools)
-        
-        if not result['success']:
-            return jsonify({
-                "success": False,
-                "error": result['error'],
-                "timestamp": datetime.now().isoformat()
-            }), 404
-        
-        # 只返回摘要信息,不包含具体消息
-        data = result['data']
-        summary = {
-            "thread_id": data['thread_id'],
-            "user_id": data['user_id'],
-            "include_tools": data['include_tools'],
-            "message_count": data['message_count'],
-            "stats": data['stats'],
-            "metadata": data['metadata'],
-            "first_message_preview": None,
-            "last_message_preview": None,
-            "conversation_preview": None
-        }
-        
-        # 添加消息预览
-        messages = data.get('messages', [])
-        if messages:
-            # 第一条human消息预览
-            for msg in messages:
-                if msg['type'] == 'human':
-                    content = str(msg['content'])
-                    summary['first_message_preview'] = content[:100] + "..." if len(content) > 100 else content
-                    break
-            
-            # 最后一条ai消息预览
-            for msg in reversed(messages):
-                if msg['type'] == 'ai' and msg.get('content', '').strip():
-                    content = str(msg['content'])
-                    summary['last_message_preview'] = content[:100] + "..." if len(content) > 100 else content
-                    break
-            
-            # 生成对话预览(第一条human消息)
-            summary['conversation_preview'] = summary['first_message_preview']
-        
-        # 添加API元数据
-        summary['api_metadata'] = {
-            "timestamp": datetime.now().isoformat(),
-            "api_version": "v1",
-            "endpoint": "get_conversation_summary"
-        }
-        
-        logger.info(f"✅ 成功获取对话摘要")
-        
-        return jsonify({
-            "success": True,
-            "data": summary,
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ 获取对话摘要失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-
-# 为了支持独立运行
-if __name__ == "__main__":
-    try:
-        # 尝试使用ASGI模式启动(推荐)
-        import uvicorn
-        from asgiref.wsgi import WsgiToAsgi
-        
-        logger.info("🚀 使用ASGI模式启动异步Flask应用...")
-        logger.info("   这将解决事件循环冲突问题,支持LangGraph异步checkpoint保存")
-        
-        # 将Flask WSGI应用转换为ASGI应用
-        asgi_app = WsgiToAsgi(app)
-        
-        # 信号处理
-        import signal
-        
-        def signal_handler(signum, frame):
-            logger.info("🛑 收到关闭信号,开始清理...")
-            print("正在关闭服务...")
-            exit(0)
-        
-        signal.signal(signal.SIGINT, signal_handler)
-        signal.signal(signal.SIGTERM, signal_handler)
-        
-        # 使用uvicorn启动ASGI应用
-        uvicorn.run(
-            asgi_app,
-            host="0.0.0.0",
-            port=8000,
-            log_level="info",
-            access_log=True
-        )
-        
-    except ImportError as e:
-        # 如果缺少ASGI依赖,fallback到传统Flask模式
-        logger.warning("⚠️ ASGI依赖缺失,使用传统Flask模式启动")
-        logger.warning("   建议安装: pip install uvicorn asgiref")
-        logger.warning("   传统模式可能存在异步事件循环冲突问题")
-        
-        # 信号处理
-        import signal
-        
-        def signal_handler(signum, frame):
-            logger.info("🛑 收到关闭信号,开始清理...")
-            print("正在关闭服务...")
-            exit(0)
-        
-        signal.signal(signal.SIGINT, signal_handler)
-        signal.signal(signal.SIGTERM, signal_handler)
-        
-        # 启动传统Flask应用
-        app.run(host="0.0.0.0", port=8000, debug=False, threaded=True) 

+ 0 - 14
test/custom_react_agent/asgi_app.py

@@ -1,14 +0,0 @@
-"""
-ASGI应用启动文件
-提供独立的ASGI启动选项,用于生产环境或uvicorn命令行启动
-"""
-from asgiref.wsgi import WsgiToAsgi
-from api import app
-
-# 将Flask WSGI应用转换为ASGI应用
-asgi_app = WsgiToAsgi(app)
-
-# 这个文件可以通过以下方式启动:
-# uvicorn asgi_app:asgi_app --host 0.0.0.0 --port 8000
-# 或
-# uvicorn asgi_app:asgi_app --host 0.0.0.0 --port 8000 --reload

+ 0 - 262
test/custom_react_agent/bak/simple_redis_api.py

@@ -1,262 +0,0 @@
-"""
-简单Redis查询API函数,替换复杂的LangGraph方法
-"""
-import redis
-import json
-from typing import List, Dict, Any
-from datetime import datetime
-
-def get_user_conversations_simple_sync(user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
-    """
-    直接从Redis获取用户对话,不使用LangGraph
-    同步版本,避免事件循环问题
-    """
-    try:
-        # 创建Redis连接
-        redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        
-        # 测试连接
-        redis_client.ping()
-        
-        # 扫描用户的checkpoint keys
-        pattern = f"checkpoint:{user_id}:*"
-        print(f"🔍 扫描模式: {pattern}")
-        
-        # 获取所有匹配的keys
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        print(f"📋 找到 {len(keys)} 个keys")
-        
-        # 解析thread信息
-        thread_data = {}
-        for key in keys:
-            try:
-                # key格式: checkpoint:user_id:timestamp:status:uuid
-                parts = key.split(':')
-                if len(parts) >= 4:
-                    thread_id = f"{parts[1]}:{parts[2]}"  # user_id:timestamp
-                    timestamp = parts[2]
-                    
-                    if thread_id not in thread_data:
-                        thread_data[thread_id] = {
-                            "thread_id": thread_id,
-                            "timestamp": timestamp,
-                            "keys": []
-                        }
-                    
-                    thread_data[thread_id]["keys"].append(key)
-                    
-            except Exception as e:
-                print(f"解析key失败 {key}: {e}")
-                continue
-        
-        print(f"📊 找到 {len(thread_data)} 个thread")
-        
-        # 按时间戳排序
-        sorted_threads = sorted(
-            thread_data.values(),
-            key=lambda x: x["timestamp"],
-            reverse=True
-        )[:limit]
-        
-        # 获取每个thread的详细信息
-        conversations = []
-        for thread_info in sorted_threads:
-            try:
-                thread_id = thread_info["thread_id"]
-                
-                # 获取该thread的最新checkpoint数据
-                latest_key = None
-                for key in thread_info["keys"]:
-                    if latest_key is None or key > latest_key:
-                        latest_key = key
-                
-                if latest_key:
-                    # 直接从Redis获取数据
-                    data = redis_client.get(latest_key)
-                    if data:
-                        try:
-                            # 尝试解析JSON数据
-                            checkpoint_data = json.loads(data)
-                            
-                            # 提取消息信息
-                            messages = checkpoint_data.get('channel_values', {}).get('messages', [])
-                            
-                            # 生成对话预览
-                            preview = "空对话"
-                            if messages:
-                                for msg in messages:
-                                    # 处理不同的消息格式
-                                    if isinstance(msg, dict):
-                                        msg_type = msg.get('type', '')
-                                        if msg_type == 'human':
-                                            content = str(msg.get('content', ''))
-                                            preview = content[:50] + "..." if len(content) > 50 else content
-                                            break
-                                    elif hasattr(msg, 'content') and hasattr(msg, '__class__'):
-                                        # LangChain消息对象
-                                        if msg.__class__.__name__ == 'HumanMessage':
-                                            content = str(msg.content)
-                                            preview = content[:50] + "..." if len(content) > 50 else content
-                                            break
-                            
-                            conversations.append({
-                                "thread_id": thread_id,
-                                "user_id": user_id,
-                                "timestamp": thread_info["timestamp"],
-                                "message_count": len(messages),
-                                "conversation_preview": preview,
-                                "formatted_time": format_timestamp_simple(thread_info["timestamp"])
-                            })
-                            
-                        except json.JSONDecodeError:
-                            print(f"❌ 解析JSON失败: {latest_key}")
-                            continue
-                        except Exception as e:
-                            print(f"❌ 处理数据失败: {e}")
-                            continue
-                    
-            except Exception as e:
-                print(f"❌ 处理thread {thread_info['thread_id']} 失败: {e}")
-                continue
-        
-        redis_client.close()
-        print(f"✅ 返回 {len(conversations)} 个对话")
-        return conversations
-        
-    except Exception as e:
-        print(f"❌ Redis查询失败: {e}")
-        return []
-
-def get_conversation_history_simple_sync(thread_id: str) -> List[Dict[str, Any]]:
-    """
-    直接从Redis获取对话历史,不使用LangGraph
-    """
-    try:
-        # 创建Redis连接
-        redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        
-        # 扫描该thread的所有checkpoint keys
-        pattern = f"checkpoint:{thread_id}:*"
-        
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        if not keys:
-            redis_client.close()
-            return []
-        
-        # 获取最新的checkpoint
-        latest_key = max(keys)
-        data = redis_client.get(latest_key)
-        
-        if not data:
-            redis_client.close()
-            return []
-        
-        # 解析数据
-        checkpoint_data = json.loads(data)
-        messages = checkpoint_data.get('channel_values', {}).get('messages', [])
-        
-        # 转换消息格式
-        history = []
-        for msg in messages:
-            if isinstance(msg, dict):
-                # 已经是字典格式
-                msg_type = msg.get('type', 'unknown')
-                if msg_type == 'human':
-                    role = "human"
-                elif msg_type == 'tool':
-                    role = "tool"
-                else:
-                    role = "ai"
-                
-                history.append({
-                    "type": role,
-                    "content": msg.get('content', ''),
-                    "tool_calls": msg.get('tool_calls', None)
-                })
-            elif hasattr(msg, '__class__'):
-                # LangChain消息对象
-                class_name = msg.__class__.__name__
-                if class_name == 'HumanMessage':
-                    role = "human"
-                elif class_name == 'ToolMessage':
-                    role = "tool"
-                else:
-                    role = "ai"
-                
-                history.append({
-                    "type": role,
-                    "content": getattr(msg, 'content', ''),
-                    "tool_calls": getattr(msg, 'tool_calls', None)
-                })
-        
-        redis_client.close()
-        return history
-        
-    except Exception as e:
-        print(f"❌ 获取对话历史失败: {e}")
-        return []
-
-def format_timestamp_simple(timestamp: str) -> str:
-    """格式化时间戳"""
-    try:
-        if len(timestamp) >= 14:
-            year = timestamp[:4]
-            month = timestamp[4:6]
-            day = timestamp[6:8]
-            hour = timestamp[8:10]
-            minute = timestamp[10:12]
-            second = timestamp[12:14]
-            return f"{year}-{month}-{day} {hour}:{minute}:{second}"
-    except Exception:
-        pass
-    return timestamp
-
-# 测试函数
-def test_simple_redis_functions():
-    """测试简单Redis函数"""
-    print("🧪 测试简单Redis函数...")
-    
-    try:
-        # 测试获取对话列表
-        print("1. 测试获取用户对话列表...")
-        conversations = get_user_conversations_simple_sync("doudou", 5)
-        print(f"   结果: {len(conversations)} 个对话")
-        
-        if conversations:
-            for conv in conversations:
-                print(f"   - {conv['thread_id']}: {conv['conversation_preview']}")
-            
-            # 测试获取对话详情
-            print("2. 测试获取对话详情...")
-            first_thread = conversations[0]['thread_id']
-            history = get_conversation_history_simple_sync(first_thread)
-            print(f"   结果: {len(history)} 条消息")
-            
-            for i, msg in enumerate(history[:3]):
-                print(f"   [{i+1}] {msg['type']}: {str(msg['content'])[:50]}...")
-        
-        print("✅ 测试完成")
-        return True
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-if __name__ == "__main__":
-    test_simple_redis_functions() 

+ 0 - 158
test/custom_react_agent/bak/simple_redis_query.py

@@ -1,158 +0,0 @@
-#!/usr/bin/env python3
-"""
-简单的Redis查询脚本,绕过LangGraph的复杂异步机制
-"""
-import asyncio
-import redis
-import json
-from typing import List, Dict, Any
-
-async def get_user_conversations_simple(user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
-    """
-    直接从Redis获取用户对话,不使用LangGraph
-    """
-    # 创建Redis连接
-    redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-    
-    try:
-        # 扫描用户的checkpoint keys
-        pattern = f"checkpoint:{user_id}:*"
-        print(f"🔍 扫描模式: {pattern}")
-        
-        # 获取所有匹配的keys
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        print(f"📋 找到 {len(keys)} 个keys")
-        
-        # 解析thread信息
-        thread_data = {}
-        for key in keys:
-            try:
-                # key格式: checkpoint:user_id:timestamp:status:uuid
-                parts = key.split(':')
-                if len(parts) >= 4:
-                    thread_id = f"{parts[1]}:{parts[2]}"  # user_id:timestamp
-                    timestamp = parts[2]
-                    
-                    if thread_id not in thread_data:
-                        thread_data[thread_id] = {
-                            "thread_id": thread_id,
-                            "timestamp": timestamp,
-                            "keys": []
-                        }
-                    
-                    thread_data[thread_id]["keys"].append(key)
-                    
-            except Exception as e:
-                print(f"解析key失败 {key}: {e}")
-                continue
-        
-        print(f"📊 找到 {len(thread_data)} 个thread")
-        
-        # 按时间戳排序
-        sorted_threads = sorted(
-            thread_data.values(),
-            key=lambda x: x["timestamp"],
-            reverse=True
-        )[:limit]
-        
-        # 获取每个thread的详细信息
-        conversations = []
-        for thread_info in sorted_threads:
-            try:
-                thread_id = thread_info["thread_id"]
-                
-                # 获取该thread的最新checkpoint数据
-                latest_key = None
-                for key in thread_info["keys"]:
-                    if latest_key is None or key > latest_key:
-                        latest_key = key
-                
-                if latest_key:
-                    # 直接从Redis获取数据
-                    data = redis_client.get(latest_key)
-                    if data:
-                        try:
-                            # 尝试解析JSON数据
-                            checkpoint_data = json.loads(data)
-                            
-                            # 提取消息信息
-                            messages = checkpoint_data.get('channel_values', {}).get('messages', [])
-                            
-                            # 生成对话预览
-                            preview = "空对话"
-                            if messages:
-                                for msg in messages:
-                                    if isinstance(msg, dict) and msg.get('type') == 'human':
-                                        content = str(msg.get('content', ''))
-                                        preview = content[:50] + "..." if len(content) > 50 else content
-                                        break
-                            
-                            conversations.append({
-                                "thread_id": thread_id,
-                                "user_id": user_id,
-                                "timestamp": thread_info["timestamp"],
-                                "message_count": len(messages),
-                                "conversation_preview": preview,
-                                "formatted_time": format_timestamp(thread_info["timestamp"])
-                            })
-                            
-                        except json.JSONDecodeError:
-                            print(f"❌ 解析JSON失败: {latest_key}")
-                            continue
-                        except Exception as e:
-                            print(f"❌ 处理数据失败: {e}")
-                            continue
-                    
-            except Exception as e:
-                print(f"❌ 处理thread {thread_info['thread_id']} 失败: {e}")
-                continue
-        
-        print(f"✅ 返回 {len(conversations)} 个对话")
-        return conversations
-        
-    finally:
-        redis_client.close()
-
-def format_timestamp(timestamp: str) -> str:
-    """格式化时间戳"""
-    try:
-        if len(timestamp) >= 14:
-            year = timestamp[:4]
-            month = timestamp[4:6]
-            day = timestamp[6:8]
-            hour = timestamp[8:10]
-            minute = timestamp[10:12]
-            second = timestamp[12:14]
-            return f"{year}-{month}-{day} {hour}:{minute}:{second}"
-    except Exception:
-        pass
-    return timestamp
-
-async def test_simple_query():
-    """测试简单查询"""
-    print("🧪 测试简单Redis查询...")
-    
-    try:
-        conversations = await get_user_conversations_simple("doudou", 10)
-        print(f"📋 查询结果: {len(conversations)} 个对话")
-        
-        for conv in conversations:
-            print(f"  - {conv['thread_id']}: {conv['conversation_preview']}")
-            
-        return conversations
-        
-    except Exception as e:
-        print(f"❌ 查询失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return []
-
-if __name__ == "__main__":
-    asyncio.run(test_simple_query()) 

+ 0 - 45
test/custom_react_agent/config.py

@@ -1,45 +0,0 @@
-"""
-全局配置文件
-"""
-import os
-import logging
-
-# --- 项目根目录 ---
-# /test/custom_react_agent/config.py -> /
-PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-# --- LLM 配置 ---
-# 在这里写死你的千问API Key
-# qwen-plus
-# qwen3-235b-a22b
-# qwen3-30b-a3b
-QWEN_API_KEY = "sk-db68e37f00974031935395315bfe07f0"
-QWEN_BASE_URL = "https://dashscope.aliyuncs.com/compatible-mode/v1"
-QWEN_MODEL = "qwen-plus"
-
-# --- Redis 配置 ---
-REDIS_URL = "redis://localhost:6379"
-REDIS_ENABLED = True
-
-# --- 日志配置 ---
-LOG_LEVEL = logging.INFO
-LOG_FORMAT = '%(asctime)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s'
-
-# --- Agent 配置 ---
-DEFAULT_USER_ID = "guest"
-
-# --- 网络重试配置 ---
-MAX_RETRIES = 3                    # 最大重试次数(减少以避免与OpenAI客户端冲突)
-RETRY_BASE_DELAY = 3               # 重试基础延迟(秒)
-NETWORK_TIMEOUT = 60               # 网络超时时间(秒)- 增加到60秒以应对长上下文处理
-
-# --- HTTP连接管理配置 ---
-HTTP_MAX_CONNECTIONS = 10          # 最大连接数
-HTTP_MAX_KEEPALIVE_CONNECTIONS = 5 # 最大保持连接数
-HTTP_KEEPALIVE_EXPIRY = 30.0       # Keep-Alive过期时间(秒)- 设置为30秒避免服务器断开
-HTTP_CONNECT_TIMEOUT = 10.0        # 连接超时(秒)
-HTTP_POOL_TIMEOUT = 5.0            # 连接池超时(秒)
-
-# --- 调试配置 ---
-DEBUG_MODE = True                  # 调试模式:True=完整日志,False=简化日志
-MAX_LOG_LENGTH = 1000              # 非调试模式下的最大日志长度 

+ 0 - 20
test/custom_react_agent/db_query_decision_prompt.txt

@@ -1,20 +0,0 @@
-=== 数据库业务范围 ===
-本系统是高速公路服务区商业管理系统,包含以下业务数据:
-
-核心业务实体:
-- 服务区(bss_service_area):服务区基础信息、位置、状态,如"鄱阳湖服务区"、"信丰西服务区"
-- 档口/商铺(bss_branch):档口信息、品类(餐饮/小吃/便利店)、品牌,如"驿美餐饮"、"加水机"
-- 营业数据(bss_business_day_data):每日支付金额、订单数量,包含微信、支付宝、现金等支付方式
-- 车流量(bss_car_day_count):按车型统计的日流量数据,包含客车、货车、过境、危化品等
-- 公司信息(bss_company):服务区管理公司,如"驿美运营公司"
-
-关键业务指标:
-- 支付方式:微信支付(wx)、支付宝支付(zfb)、现金支付(rmb)、行吧支付(xs)、金豆支付(jd)
-- 营业数据:支付金额、订单数量、营业额、收入统计
-- 车流统计:按车型(客车/货车/过境/危化品/城际)的流量分析
-- 经营分析:餐饮、小吃、便利店、整体租赁等品类收入
-- 地理分区:北区、南区、西区、东区、两区
-
-高速线路:
-- 线路信息:大广、昌金、昌栗等高速线路
-- 路段管理:按线路统计服务区分布

+ 0 - 170
test/custom_react_agent/doc/ASGI_启动说明.md

@@ -1,170 +0,0 @@
-# ASGI模式启动说明
-
-## 概述
-
-为了解决Flask与LangGraph异步事件循环冲突问题("Event loop is closed"错误),我们将Flask应用改为使用ASGI适配器启动。这样可以获得真正的异步支持,允许LangGraph的checkpoint保存在请求完成后继续执行。
-
-## 问题背景
-
-原本的错误:
-```
-redisvl.exceptions.RedisSearchError: Unexpected error while searching: Event loop is closed
-```
-
-这个错误发生在Flask路由处理完成后,LangGraph尝试异步保存checkpoint时事件循环已被关闭。
-
-## 解决方案
-
-使用ASGI适配器(`WsgiToAsgi`)将Flask WSGI应用包装为ASGI应用,然后使用uvicorn ASGI服务器运行,获得持久化事件循环支持。
-
-## 安装依赖
-
-```bash
-# 进入项目目录
-cd test/custom_react_agent
-
-# 安装ASGI依赖
-pip install uvicorn asgiref
-
-# 或者安装所有依赖
-pip install -r requirements.txt
-```
-
-## 启动方式
-
-### 方式1:直接运行api.py(推荐)
-
-```bash
-cd test/custom_react_agent
-python api.py
-```
-
-**说明**:
-- 会自动尝试使用ASGI模式启动
-- 如果缺少依赖,会fallback到传统Flask模式
-- 支持Ctrl+C优雅关闭
-
-**启动日志示例**:
-```
-🚀 使用ASGI模式启动异步Flask应用...
-   这将解决事件循环冲突问题,支持LangGraph异步checkpoint保存
-INFO:     Started server process [12345]
-INFO:     Waiting for application startup.
-INFO:     Application startup complete.
-INFO:     Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
-```
-
-### 方式2:使用uvicorn命令行启动
-
-```bash
-cd test/custom_react_agent
-
-# 启动ASGI应用
-uvicorn asgi_app:asgi_app --host 0.0.0.0 --port 8000
-
-# 开发模式(自动重载)
-uvicorn asgi_app:asgi_app --host 0.0.0.0 --port 8000 --reload
-
-# 生产模式(多worker)
-uvicorn asgi_app:asgi_app --host 0.0.0.0 --port 8000 --workers 4
-```
-
-### 方式3:传统Flask模式(不推荐)
-
-如果ASGI依赖安装失败,会自动fallback到传统Flask模式:
-
-```
-⚠️ ASGI依赖缺失,使用传统Flask模式启动
-   建议安装: pip install uvicorn asgiref
-   传统模式可能存在异步事件循环冲突问题
-```
-
-## 测试API
-
-启动后,可以测试API:
-
-```bash
-# 测试健康检查
-curl http://localhost:8000/health
-
-# 测试聊天API
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{
-    "user_id": "test_user",
-    "question": "请问下一届足球世界杯在哪里举行?"
-  }'
-```
-
-## 预期效果
-
-1. **完全解决"Event loop is closed"错误**
-2. **LangGraph checkpoint正常保存**
-3. **支持连续多次API调用**
-4. **保持所有现有功能**
-
-## 验证成功
-
-如果修复成功,您应该看到:
-- API响应正常
-- 日志中没有"Event loop is closed"错误
-- 对话状态正确保存
-- 连续请求都能正常处理
-
-## 故障排除
-
-### 如果依然出现事件循环错误:
-
-1. **确认使用ASGI模式**:检查启动日志是否显示"使用ASGI模式启动"
-2. **检查依赖版本**:确保uvicorn和asgiref版本符合要求
-3. **重启服务**:完全重启API服务
-4. **检查Redis连接**:确保Redis服务正常运行
-
-### 常见问题:
-
-**Q: ImportError: No module named 'uvicorn'**
-A: 运行 `pip install uvicorn asgiref`
-
-**Q: 启动时提示权限错误**
-A: 尝试更换端口:`python api.py` 或在代码中修改端口号
-
-**Q: 仍然出现异步错误**
-A: 检查是否真的使用了ASGI模式,查看启动日志确认
-
-## 技术说明
-
-### ASGI vs WSGI
-
-- **WSGI(原来)**:同步协议,每个请求结束后关闭事件循环
-- **ASGI(现在)**:异步协议,保持事件循环活跃,支持异步任务
-
-### WsgiToAsgi适配器
-
-- 无缝将Flask WSGI应用转换为ASGI兼容
-- 保持所有Flask代码不变
-- 获得真正的异步支持
-
-## 文件说明
-
-- `api.py`:主要API文件,包含自动ASGI启动逻辑
-- `asgi_app.py`:独立ASGI应用文件,用于uvicorn命令行启动
-- `requirements.txt`:所需依赖列表
-- `ASGI_启动说明.md`:本文档
-
-## 成功案例
-
-修复后的API调用应该像这样工作:
-
-```bash
-# 第一次请求
-curl -X POST http://localhost:8000/api/chat -H "Content-Type: application/json" -d '{"user_id":"polo","question":"你好"}'
-# ✅ 成功响应
-
-# 第二次请求(之前会失败)
-curl -X POST http://localhost:8000/api/chat -H "Content-Type: application/json" -d '{"user_id":"polo","question":"请问下一届足球世界杯在哪里举行?"}'
-# ✅ 成功响应,无事件循环错误
-```
-
----
-
-如有问题,请检查启动日志和错误信息。

+ 0 - 159
test/custom_react_agent/doc/FLASK_MIGRATION.md

@@ -1,159 +0,0 @@
-# Flask API 迁移说明
-
-## 📋 迁移概述
-
-已将 Custom React Agent API 从 FastAPI 迁移到 Flask,保持了相同的功能和接口,但使用了不同的 Web 框架。
-
-## 🔄 主要变化
-
-### 1. 依赖包变化
-```bash
-# 旧版本 (FastAPI)
-pip install fastapi uvicorn aiohttp
-
-# 新版本 (Flask)
-pip install flask aiohttp
-```
-
-### 2. 框架特性差异
-
-| 特性 | FastAPI | Flask |
-|------|---------|--------|
-| 自动API文档 | ✅ 自动生成 `/docs` | ❌ 无自动文档 |
-| 请求验证 | ✅ Pydantic 自动验证 | ⚠️ 手动验证 |
-| 异步支持 | ✅ 原生支持 | ⚠️ 需要 asyncio.run() |
-| 类型提示 | ✅ 完整支持 | ⚠️ 基础支持 |
-| 性能 | 🚀 更高 | 📊 中等 |
-| 学习曲线 | 📈 中等 | 📊 简单 |
-
-### 3. 代码结构变化
-
-#### 路由定义
-```python
-# FastAPI
-@app.post("/api/chat", response_model=ChatResponse)
-async def chat_endpoint(request: ChatRequest):
-    ...
-
-# Flask
-@app.route("/api/chat", methods=["POST"])
-def chat_endpoint():
-    data = request.get_json()
-    ...
-```
-
-#### 响应格式
-```python
-# FastAPI
-return ChatResponse(code=200, message="成功", data=result)
-
-# Flask
-return jsonify({"code": 200, "message": "成功", "data": result})
-```
-
-#### 错误处理
-```python
-# FastAPI
-raise HTTPException(status_code=400, detail="错误信息")
-
-# Flask
-return jsonify({"error": "错误信息"}), 400
-```
-
-## ✅ 保持不变的功能
-
-1. **API 接口**: 所有端点路径和参数保持不变
-2. **响应格式**: JSON 响应结构完全一致
-3. **功能逻辑**: Agent 处理逻辑无任何变化
-4. **会话管理**: Thread ID 管理机制保持原样
-5. **错误处理**: 错误代码和消息保持一致
-
-## 🚀 启动方式
-
-### Flask 版本启动
-```bash
-# 方式1:直接运行
-python api.py
-
-# 方式2:使用 flask 命令
-export FLASK_APP=api.py
-flask run --host=0.0.0.0 --port=8000
-```
-
-### 测试验证
-```bash
-# 健康检查
-curl http://localhost:8000/health
-
-# 功能测试
-python test_api.py
-```
-
-## 🔧 开发者注意事项
-
-### 1. 异步函数调用
-```python
-# Flask 中调用异步 Agent 方法
-agent_result = asyncio.run(_agent_instance.chat(...))
-```
-
-### 2. 请求数据验证
-```python
-# 手动验证替代 Pydantic
-def validate_request_data(data):
-    errors = []
-    if not data.get('question'):
-        errors.append('问题不能为空')
-    # ... 更多验证
-    if errors:
-        raise ValueError('; '.join(errors))
-```
-
-### 3. CORS 支持
-```python
-# 暂时不启用跨域支持
-# 如果需要跨域支持,可以安装 flask-cors
-# pip install flask-cors
-```
-
-## 📊 性能考虑
-
-1. **单线程处理**: Flask 默认单线程,高并发时需要配置 WSGI 服务器
-2. **内存使用**: 相比 FastAPI 略低
-3. **启动速度**: 更快的启动时间
-4. **开发效率**: 更简单的调试和开发
-
-## 🛠️ 生产部署建议
-
-### 使用 Gunicorn
-```bash
-pip install gunicorn
-gunicorn -w 4 -b 0.0.0.0:8000 api:app
-```
-
-### 使用 uWSGI
-```bash
-pip install uwsgi
-uwsgi --http :8000 --wsgi-file api.py --callable app --workers 4
-```
-
-## 🐛 故障排除
-
-### 常见问题
-
-1. **异步函数调用错误**
-   - 确保使用 `asyncio.run()` 包装异步调用
-
-2. **CORS 错误**
-   - 当前未启用跨域支持
-   - 如需跨域支持,可安装 `pip install flask-cors`
-
-3. **端口占用**
-   ```bash
-   # 查看端口占用
-   netstat -an | grep 8000
-   ```
-
----
-
-**迁移完成**: Flask 版本已完全实现所有 FastAPI 功能,接口保持 100% 兼容。 

+ 0 - 102
test/custom_react_agent/doc/MIGRATION_COMPLETE.md

@@ -1,102 +0,0 @@
-# ✅ Flask API 迁移完成
-
-## 📋 迁移总结
-
-Custom React Agent API 已成功从 FastAPI 迁移到 Flask,所有功能保持完整且兼容。
-
-## 🔄 已完成的修改
-
-### 1. 核心文件
-- ✅ **api.py** - 完全重写为 Flask 实现,支持直接运行
-- ✅ **test_api.py** - 保持测试兼容
-- ✅ **README_API.md** - 更新文档
-- ✅ **QUICKSTART.md** - 更新快速指南
-
-### 2. 新增文件
-- ✅ **FLASK_MIGRATION.md** - 迁移说明文档
-- ✅ **MIGRATION_COMPLETE.md** - 本总结文档
-
-## 🔧 技术变更
-
-### 依赖包变更
-```bash
-# 旧版本
-pip install fastapi uvicorn aiohttp
-
-# 新版本
-pip install flask aiohttp
-```
-
-### 框架特性
-- ✅ **路由系统**: FastAPI 装饰器 → Flask 路由
-- ✅ **请求验证**: Pydantic 模型 → 手动验证函数
-- ✅ **响应格式**: FastAPI 响应模型 → Flask jsonify
-- ✅ **错误处理**: HTTPException → Flask 错误响应
-- ✅ **异步支持**: 原生异步 → asyncio.run() 包装
-- ✅ **CORS 支持**: 内置 → 暂时禁用
-
-## 🚀 启动验证
-
-### 快速启动
-```bash
-cd test/custom_react_agent
-python api.py
-```
-
-### 健康检查
-```bash
-curl http://localhost:8000/health
-```
-
-### 功能测试
-```bash
-python test_api.py
-```
-
-## 📊 兼容性确认
-
-### API 接口
-- ✅ **端点路径**: 保持不变
-- ✅ **请求格式**: JSON 格式一致
-- ✅ **响应结构**: 完全兼容
-- ✅ **错误代码**: 状态码一致
-- ✅ **参数验证**: 验证逻辑保持
-
-### 功能特性
-- ✅ **Agent 处理**: 完全兼容
-- ✅ **Thread ID**: 会话管理保持
-- ✅ **元数据收集**: react_agent_meta 正常
-- ✅ **SQL 查询**: 数据提取正常
-- ✅ **错误处理**: 异常捕获完整
-
-## 🎯 测试项目
-
-### 基础功能
-- [ ] 健康检查端点
-- [ ] 普通问答
-- [ ] SQL 查询
-- [ ] 错误处理
-- [ ] 参数验证
-- [ ] 会话管理
-
-### 高级功能
-- [ ] 并发请求处理
-- [ ] 异步 Agent 调用
-- [ ] 元数据收集
-- [ ] 日志记录
-
-## 🔮 后续计划
-
-1. **性能优化**: 考虑使用 Gunicorn 等 WSGI 服务器
-2. **监控完善**: 添加更多监控指标
-3. **文档补充**: 根据使用情况补充文档
-4. **测试扩展**: 添加更多边界测试
-
----
-
-**迁移状态**: ✅ 完成  
-**兼容性**: ✅ 100% 兼容  
-**测试状态**: ✅ 通过  
-**文档状态**: ✅ 完善  
-
-**可以开始使用 Flask 版本的 Custom React Agent API!** 

+ 0 - 281
test/custom_react_agent/doc/QUICKSTART.md

@@ -1,281 +0,0 @@
-# Custom React Agent - 快速开始指南
-
-## 🚀 5分钟快速启动
-
-### 1. 启动API服务
-```bash
-cd test/custom_react_agent
-python api.py
-```
-
-服务将在 http://localhost:8000 启动
-
-### 2. 验证服务状态
-```bash
-curl http://localhost:8000/health
-```
-
-### 3. 开始对话
-```bash
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{"question": "请问哪个高速服务区的档口数量最多?", "user_id": "doudou"}'
-```
-
-### 4. 查看对话历史 ⭐ 新功能
-```bash
-# 查看用户的对话列表
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations?limit=5"
-
-# 查看特定对话的详细内容
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations/doudou:20250115103000001"
-```
-
-## 📋 基本API用法
-
-### 智能问答
-```bash
-# 普通对话
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{"question": "你好", "user_id": "alice"}'
-
-# SQL查询
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{"question": "查询收入最高的服务区", "user_id": "alice"}'
-
-# 继续对话 (使用相同thread_id)
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{"question": "详细说明一下", "user_id": "alice", "thread_id": "alice:20250115103000001"}'
-```
-
-### 对话历史管理 ⭐ 新功能
-```bash
-# 获取用户对话列表
-curl "http://localhost:8000/api/v0/react/users/alice/conversations"
-
-# 限制返回数量
-curl "http://localhost:8000/api/v0/react/users/alice/conversations?limit=10"
-
-# 获取特定对话详情
-curl "http://localhost:8000/api/v0/react/users/alice/conversations/alice:20250115103000001"
-```
-
-## 💻 Python 客户端示例
-
-### 基础对话
-```python
-import requests
-
-def chat_with_agent(question, user_id, thread_id=None):
-    url = "http://localhost:8000/api/chat"
-    payload = {
-        "question": question,
-        "user_id": user_id
-    }
-    if thread_id:
-        payload["thread_id"] = thread_id
-    
-    response = requests.post(url, json=payload)
-    return response.json()
-
-# 使用示例
-result = chat_with_agent("请问服务区数据查询", "alice")
-print(f"回答: {result['data']['response']}")
-```
-
-### 对话历史查询 ⭐ 新功能
-```python
-import requests
-
-def get_user_conversations(user_id, limit=10):
-    """获取用户对话列表"""
-    url = f"http://localhost:8000/api/v0/react/users/{user_id}/conversations"
-    params = {"limit": limit}
-    
-    response = requests.get(url, params=params)
-    return response.json()
-
-def get_conversation_detail(user_id, thread_id):
-    """获取对话详情"""
-    url = f"http://localhost:8000/api/v0/react/users/{user_id}/conversations/{thread_id}"
-    
-    response = requests.get(url)
-    return response.json()
-
-# 使用示例
-conversations = get_user_conversations("alice", limit=5)
-print(f"找到 {len(conversations['data']['conversations'])} 个对话")
-
-if conversations['data']['conversations']:
-    thread_id = conversations['data']['conversations'][0]['thread_id']
-    detail = get_conversation_detail("alice", thread_id)
-    print(f"对话包含 {detail['data']['message_count']} 条消息")
-```
-
-## 🌐 JavaScript/前端示例
-
-### 基础对话
-```javascript
-async function chatWithAgent(question, userId, threadId = null) {
-    const response = await fetch('http://localhost:8000/api/chat', {
-        method: 'POST',
-        headers: { 'Content-Type': 'application/json' },
-        body: JSON.stringify({
-            question: question,
-            user_id: userId,
-            ...(threadId && { thread_id: threadId })
-        })
-    });
-    
-    return await response.json();
-}
-
-// 使用示例
-const result = await chatWithAgent("查询服务区信息", "alice");
-console.log("回答:", result.data.response);
-```
-
-### 对话历史管理 ⭐ 新功能
-```javascript
-async function getUserConversations(userId, limit = 10) {
-    const response = await fetch(
-        `http://localhost:8000/api/v0/react/users/${userId}/conversations?limit=${limit}`
-    );
-    return await response.json();
-}
-
-async function getConversationDetail(userId, threadId) {
-    const response = await fetch(
-        `http://localhost:8000/api/v0/react/users/${userId}/conversations/${threadId}`
-    );
-    return await response.json();
-}
-
-// 使用示例
-const conversations = await getUserConversations("alice", 5);
-console.log(`找到 ${conversations.data.conversations.length} 个对话`);
-
-if (conversations.data.conversations.length > 0) {
-    const firstConv = conversations.data.conversations[0];
-    const detail = await getConversationDetail("alice", firstConv.thread_id);
-    console.log(`对话详情:`, detail.data);
-}
-```
-
-## 🧪 测试工具
-
-### 运行完整测试
-```bash
-cd test/custom_react_agent
-python test_api.py
-```
-
-### 测试新的对话历史功能 ⭐
-```bash
-cd test/custom_react_agent
-python test_conversation_api.py
-```
-
-### 单独测试问题
-```bash
-python test_api.py "查询服务区收入排名"
-```
-
-## 🎯 典型应用场景
-
-### 1. 聊天机器人界面
-```python
-# 获取用户的历史对话,显示对话列表
-conversations = get_user_conversations("user123", limit=20)
-
-for conv in conversations['data']['conversations']:
-    print(f"[{conv['formatted_time']}] {conv['conversation_preview']}")
-```
-
-### 2. 客服系统
-```python
-# 客服查看用户的完整对话历史
-user_id = "customer_456"
-conversations = get_user_conversations(user_id)
-
-for conv in conversations['data']['conversations']:
-    thread_id = conv['thread_id']
-    detail = get_conversation_detail(user_id, thread_id)
-    
-    print(f"对话时间: {conv['formatted_time']}")
-    print(f"消息数量: {detail['data']['message_count']}")
-    # 显示详细消息...
-```
-
-### 3. 对话分析
-```python
-# 分析用户的对话模式
-conversations = get_user_conversations("analyst_user")
-
-total_messages = sum(conv['message_count'] for conv in conversations['data']['conversations'])
-avg_messages = total_messages / len(conversations['data']['conversations'])
-
-print(f"平均每个对话 {avg_messages:.1f} 条消息")
-```
-
-## 🔧 Thread ID 设计说明
-
-### 格式规则
-- **格式**: `{user_id}:{timestamp}`
-- **示例**: `doudou:20250115103000001`
-- **优势**: 
-  - 自然包含用户信息
-  - 支持时间排序
-  - 无需额外映射表
-
-### 时间戳格式
-```
-20250115103000001
-│  │  │ │ │ │ │
-│  │  │ │ │ │ └── 毫秒 (001)
-│  │  │ │ │ └──── 秒 (30)
-│  │  │ │ └────── 分钟 (30)
-│  │  │ └──────── 小时 (10)
-│  │  └────────── 日 (15)
-│  └───────────── 月 (01)
-└─────────────── 年 (2025)
-```
-
-## ⚠️ 注意事项
-
-1. **服务依赖**: 确保Redis服务可用
-2. **数据库连接**: 确认业务数据库连接正常
-3. **并发限制**: API有并发和频率限制
-4. **数据安全**: 生产环境需要添加认证授权
-5. **监控日志**: 注意观察API日志和性能指标
-
-## 🔍 故障排除
-
-### 常见问题
-```bash
-# 检查服务状态
-curl http://localhost:8000/health
-
-# 查看详细日志
-python api.py  # 查看启动日志
-
-# 测试基础功能
-python test_api.py "你好"
-
-# 测试新功能
-python test_conversation_api.py
-```
-
-### 性能优化
-- 对话列表查询使用Redis SCAN,支持大量数据
-- 合理设置limit参数避免过大响应
-- 生产环境建议添加缓存层
-
----
-
-🎉 现在你已经掌握了Custom React Agent API的基本用法和新的对话历史管理功能!
-
-📚 更多详细信息请参考: [完整API文档](./README_API.md) 

+ 0 - 205
test/custom_react_agent/doc/README_API.md

@@ -1,205 +0,0 @@
-# Custom React Agent API 文档
-
-Flask API服务,提供与Custom React Agent进行交互的RESTful接口。
-
-## 🚀 快速开始
-
-### 启动服务
-```bash
-cd test/custom_react_agent
-python api.py
-```
-
-服务将在 http://localhost:8000 启动
-
-## 📋 API 端点
-
-### 1. 健康检查
-**GET** `/health`
-
-检查API服务状态
-
-**响应示例:**
-```json
-{
-  "status": "healthy",
-  "agent_initialized": true,
-  "timestamp": "2025-01-15T10:30:00"
-}
-```
-
-### 2. 聊天接口
-**POST** `/api/chat`
-
-与Agent进行对话
-
-**请求参数:**
-```json
-{
-  "question": "请问哪个高速服务区的档口数量最多?",
-  "user_id": "doudou",
-  "thread_id": "doudou:20250115103000001"  // 可选,不提供则自动生成
-}
-```
-
-**响应示例:**
-```json
-{
-  "success": true,
-  "data": {
-    "records": {...},      // SQL查询结果
-    "response": "...",     // Agent回答
-    "sql": "...",         // 执行的SQL
-    "react_agent_meta": {...}
-  },
-  "thread_id": "doudou:20250115103000001",
-  "timestamp": "2025-01-15T10:30:00"
-}
-```
-
-### 3. 获取用户对话列表 ⭐ 新增
-**GET** `/api/v0/react/users/{user_id}/conversations`
-
-获取指定用户的最近聊天记录列表
-
-**路径参数:**
-- `user_id`: 用户ID
-
-**查询参数:**
-- `limit`: 返回数量限制 (默认10,最大50)
-
-**请求示例:**
-```bash
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations?limit=5"
-```
-
-**响应示例:**
-```json
-{
-  "success": true,
-  "data": {
-    "user_id": "doudou",
-    "conversations": [
-      {
-        "thread_id": "doudou:20250115103000001",
-        "user_id": "doudou",
-        "timestamp": "20250115103000001",
-        "message_count": 4,
-        "last_message": "南城服务区的档口数量最多,共有39个档口。",
-        "last_updated": "2025-01-15T10:30:00",
-        "conversation_preview": "请问哪个高速服务区的档口数量最多?",
-        "formatted_time": "2025-01-15 10:30:00"
-      },
-      {
-        "thread_id": "doudou:20250115102500002", 
-        "user_id": "doudou",
-        "timestamp": "20250115102500002",
-        "message_count": 6,
-        "last_message": "共有6个餐饮档口。",
-        "last_updated": "2025-01-15T10:25:00",
-        "conversation_preview": "南城服务区有多少个餐饮档口?",
-        "formatted_time": "2025-01-15 10:25:00"
-      }
-    ],
-    "total_count": 2,
-    "limit": 5
-  },
-  "timestamp": "2025-01-15T10:35:00"
-}
-```
-
-### 4. 获取对话详情 ⭐ 新增
-**GET** `/api/v0/react/users/{user_id}/conversations/{thread_id}`
-
-获取特定对话的详细历史记录
-
-**路径参数:**
-- `user_id`: 用户ID
-- `thread_id`: 对话线程ID (必须以 `user_id:` 开头)
-
-**请求示例:**
-```bash
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations/doudou:20250115103000001"
-```
-
-**响应示例:**
-```json
-{
-  "success": true,
-  "data": {
-    "user_id": "doudou",
-    "thread_id": "doudou:20250115103000001",
-    "message_count": 4,
-    "messages": [
-      {
-        "type": "human",
-        "content": "请问哪个高速服务区的档口数量最多?",
-        "tool_calls": null
-      },
-      {
-        "type": "ai", 
-        "content": "我来帮您查询一下高速服务区的档口数量信息。",
-        "tool_calls": [...]
-      },
-      {
-        "type": "tool",
-        "content": "[{\"service_area\": \"南城服务区\", \"booth_count\": 39}, ...]",
-        "tool_calls": null
-      },
-      {
-        "type": "ai",
-        "content": "南城服务区的档口数量最多,共有39个档口。",
-        "tool_calls": null
-      }
-    ]
-  },
-  "timestamp": "2025-01-15T10:35:00"
-}
-```
-
-## 🔧 技术特性
-
-### Thread ID 设计
-- 格式:`{user_id}:{timestamp}`
-- 示例:`doudou:20250115103000001`
-- 自动按时间戳排序
-- 无需额外映射表
-
-### 数据持久化
-- 使用 AsyncRedisSaver 存储对话状态
-- 支持跨会话的对话历史查询
-- Redis pattern匹配高效查询用户数据
-
-### 错误处理
-- 统一的JSON错误格式
-- 详细的错误日志
-- 优雅的异常处理
-
-## 📊 使用场景
-
-1. **聊天机器人界面**: 显示用户的历史对话列表
-2. **对话管理**: 查看和管理特定对话的详细内容
-3. **数据分析**: 分析用户的对话模式和频率
-4. **客服系统**: 客服人员查看用户历史对话记录
-
-## 🔍 测试示例
-
-```bash
-# 1. 发起对话
-curl -X POST http://localhost:8000/api/chat \
-  -H "Content-Type: application/json" \
-  -d '{"question": "请问哪个高速服务区的档口数量最多?", "user_id": "doudou"}'
-
-# 2. 查看对话列表  
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations?limit=5"
-
-# 3. 查看特定对话详情
-curl "http://localhost:8000/api/v0/react/users/doudou/conversations/doudou:20250115103000001"
-```
-
-## 📝 注意事项
-
-- user_id 和 thread_id 的格式验证
-- limit 参数范围限制 (1-50)
-- 异步操作的错误处理
-- Redis连接的健壮性 

+ 0 - 31
test/custom_react_agent/doc/README_valid_sql_test.md

@@ -1,31 +0,0 @@
-# valid_sql 测试说明
-
-## 概述
-
-简化版测试脚本,专门测试 `valid_sql` 工具的三种错误场景:
-
-1. **表不存在** - `SELECT * FROM non_existent_table LIMIT 1`
-2. **字段不存在** - `SELECT non_existent_field FROM bss_business_day_data LIMIT 1`  
-3. **语法错误** - `SELECT * FROM bss_business_day_data WHERE`
-
-## 使用方法
-
-```bash
-# 激活虚拟环境
-.\.venv\Scripts\Activate.ps1
-
-# 运行测试
-python test_valid_sql_simple.py
-```
-
-## 测试内容
-
-脚本会依次测试三种错误场景:
-
-1. **直接测试 valid_sql 工具** - 验证工具是否正确识别错误
-2. **测试 LLM 响应** - 观察 LLM 收到错误后如何处理
-
-## 预期结果
-
-- `valid_sql` 工具应该正确识别并报告错误
-- LLM 应该理解错误原因并提供有意义的响应 

+ 0 - 296
test/custom_react_agent/doc/agent.py.backup

@@ -1,296 +0,0 @@
-"""
-基于 StateGraph 的、具备上下文感知能力的 React Agent 核心实现
-"""
-import logging
-import json
-import pandas as pd
-from typing import List, Optional, Dict, Any, Tuple
-from contextlib import AsyncExitStack
-
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage, ToolMessage, BaseMessage, SystemMessage
-from langgraph.graph import StateGraph, END
-from langgraph.prebuilt import ToolNode
-from redis.asyncio import Redis
-try:
-    from langgraph.checkpoint.redis import AsyncRedisSaver
-except ImportError:
-    AsyncRedisSaver = None
-
-# 从新模块导入配置、状态和工具
-from . import config
-from .state import AgentState
-from .sql_tools import sql_tools
-from langchain_core.runnables import RunnablePassthrough
-
-logger = logging.getLogger(__name__)
-
-class CustomReactAgent:
-    """
-    一个使用 StateGraph 构建的、具备上下文感知和持久化能力的 Agent。
-    """
-    def __init__(self):
-        """私有构造函数,请使用 create() 类方法来创建实例。"""
-        self.llm = None
-        self.tools = None
-        self.agent_executor = None
-        self.checkpointer = None
-        self._exit_stack = None
-
-    @classmethod
-    async def create(cls):
-        """异步工厂方法,创建并初始化 CustomReactAgent 实例。"""
-        instance = cls()
-        await instance._async_init()
-        return instance
-
-    async def _async_init(self):
-        """异步初始化所有组件。"""
-        logger.info("🚀 开始初始化 CustomReactAgent...")
-
-        # 1. 初始化 LLM
-        self.llm = ChatOpenAI(
-            api_key=config.QWEN_API_KEY,
-            base_url=config.QWEN_BASE_URL,
-            model=config.QWEN_MODEL,
-            temperature=0.1,
-            model_kwargs={
-                "extra_body": {
-                    "enable_thinking": False,
-                    "misc": {
-                        "ensure_ascii": False
-                    }
-                }
-            }
-        )
-        logger.info(f"   LLM 已初始化,模型: {config.QWEN_MODEL}")
-
-        # 2. 绑定工具
-        self.tools = sql_tools
-        self.llm_with_tools = self.llm.bind_tools(self.tools)
-        logger.info(f"   已绑定 {len(self.tools)} 个工具。")
-
-        # 3. 初始化 Redis Checkpointer
-        if config.REDIS_ENABLED and AsyncRedisSaver is not None:
-            try:
-                self._exit_stack = AsyncExitStack()
-                checkpointer_manager = AsyncRedisSaver.from_conn_string(config.REDIS_URL)
-                self.checkpointer = await self._exit_stack.enter_async_context(checkpointer_manager)
-                await self.checkpointer.asetup()
-                logger.info(f"   AsyncRedisSaver 持久化已启用: {config.REDIS_URL}")
-            except Exception as e:
-                logger.error(f"   ❌ RedisSaver 初始化失败: {e}", exc_info=True)
-                if self._exit_stack:
-                    await self._exit_stack.aclose()
-                self.checkpointer = None
-        else:
-            logger.warning("   Redis 持久化功能已禁用。")
-
-        # 4. 构建 StateGraph
-        self.agent_executor = self._create_graph()
-        logger.info("   StateGraph 已构建并编译。")
-        logger.info("✅ CustomReactAgent 初始化完成。")
-
-    async def close(self):
-        """清理资源,关闭 Redis 连接。"""
-        if self._exit_stack:
-            await self._exit_stack.aclose()
-            self._exit_stack = None
-            self.checkpointer = None
-            logger.info("✅ RedisSaver 资源已通过 AsyncExitStack 释放。")
-
-    def _create_graph(self):
-        """定义并编译最终的、正确的 StateGraph 结构。"""
-        builder = StateGraph(AgentState)
-
-        # 定义所有需要的节点
-        builder.add_node("agent", self._agent_node)
-        builder.add_node("prepare_tool_input", self._prepare_tool_input_node)
-        builder.add_node("tools", ToolNode(self.tools))
-        builder.add_node("update_state_after_tool", self._update_state_after_tool_node)
-        builder.add_node("format_final_response", self._format_final_response_node)
-
-        # 建立正确的边连接
-        builder.set_entry_point("agent")
-        builder.add_conditional_edges(
-            "agent",
-            self._should_continue,
-            {
-                "continue": "prepare_tool_input",
-                "end": "format_final_response"
-            }
-        )
-        builder.add_edge("prepare_tool_input", "tools")
-        builder.add_edge("tools", "update_state_after_tool")
-        builder.add_edge("update_state_after_tool", "agent")
-        builder.add_edge("format_final_response", END)
-
-        return builder.compile(checkpointer=self.checkpointer)
-
-    def _should_continue(self, state: AgentState) -> str:
-        """判断是继续调用工具还是结束。"""
-        last_message = state["messages"][-1]
-        if hasattr(last_message, "tool_calls") and last_message.tool_calls:
-            return "continue"
-        return "end"
-
-    def _agent_node(self, state: AgentState) -> Dict[str, Any]:
-        """Agent 节点:只负责调用 LLM 并返回其输出。"""
-        logger.info(f"🧠 [Node] agent - Thread: {state['thread_id']}")
-        
-        messages_for_llm = list(state["messages"])
-        if state.get("suggested_next_step"):
-            instruction = f"提示:建议下一步使用工具 '{state['suggested_next_step']}'。"
-            messages_for_llm.append(SystemMessage(content=instruction))
-
-        response = self.llm_with_tools.invoke(messages_for_llm)
-        logger.info(f"   LLM Response: {response.pretty_print()}")
-        
-        # 只返回消息,不承担其他职责
-        return {"messages": [response]}
-    
-    def _prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-        """
-        信息组装节点:为需要上下文的工具注入历史消息。
-        """
-        logger.info(f"🛠️ [Node] prepare_tool_input - Thread: {state['thread_id']}")
-        
-        last_message = state["messages"][-1]
-        if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
-            return {"messages": [last_message]}
-
-        # 创建一个新的 AIMessage 来替换,避免直接修改 state 中的对象
-        new_tool_calls = []
-        for tool_call in last_message.tool_calls:
-            if tool_call["name"] == "generate_sql":
-                logger.info("   检测到 generate_sql 调用,注入历史消息。")
-                # 复制一份以避免修改原始 tool_call
-                modified_args = tool_call["args"].copy()
-                
-                # 将消息对象列表转换为可序列化的字典列表
-                serializable_history = []
-                for msg in state["messages"]:
-                    serializable_history.append({
-                        "type": msg.type,
-                        "content": msg.content
-                    })
-                
-                modified_args["history_messages"] = serializable_history
-                logger.info(f"   注入了 {len(serializable_history)} 条历史消息")
-                
-                new_tool_calls.append({
-                    "name": tool_call["name"],
-                    "args": modified_args,
-                    "id": tool_call["id"],
-                })
-            else:
-                new_tool_calls.append(tool_call)
-        
-        # 用包含修改后参数的新消息替换掉原来的
-        last_message.tool_calls = new_tool_calls
-        return {"messages": [last_message]}
-
-    def _update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-        """在工具执行后,更新 suggested_next_step 并清理参数。"""
-        logger.info(f"📝 [Node] update_state_after_tool - Thread: {state['thread_id']}")
-        
-        last_tool_message = state['messages'][-1]
-        tool_name = last_tool_message.name
-        tool_output = last_tool_message.content
-        next_step = None
-
-        if tool_name == 'generate_sql':
-            if "失败" in tool_output or "无法生成" in tool_output:
-                next_step = 'answer_with_common_sense'
-            else:
-                next_step = 'valid_sql'
-            
-            # 🎯 清理 generate_sql 的 history_messages 参数,设置为空字符串
-            self._clear_history_messages_parameter(state['messages'])
-        
-        elif tool_name == 'valid_sql':
-            if "失败" in tool_output:
-                next_step = 'analyze_validation_error'
-            else:
-                next_step = 'run_sql'
-
-        elif tool_name == 'run_sql':
-            next_step = 'summarize_final_answer'
-            
-        logger.info(f"   Tool '{tool_name}' executed. Suggested next step: {next_step}")
-        return {"suggested_next_step": next_step}
-
-    def _clear_history_messages_parameter(self, messages: List[BaseMessage]) -> None:
-        """
-        将 generate_sql 工具的 history_messages 参数设置为空字符串
-        """
-        for message in messages:
-            if hasattr(message, "tool_calls") and message.tool_calls:
-                for tool_call in message.tool_calls:
-                    if tool_call["name"] == "generate_sql" and "history_messages" in tool_call["args"]:
-                        tool_call["args"]["history_messages"] = ""
-                        logger.info(f"   已将 generate_sql 的 history_messages 设置为空字符串")
-
-    def _format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-        """最终输出格式化节点。"""
-        logger.info(f"🎨 [Node] format_final_response - Thread: {state['thread_id']}")
-        last_message = state['messages'][-1]
-        last_message.content = f"[Formatted Output]\n{last_message.content}"
-        return {"messages": [last_message]}
-
-    async def chat(self, message: str, user_id: str, thread_id: Optional[str] = None) -> Dict[str, Any]:
-        """
-        处理用户聊天请求。
-        """
-        if not thread_id:
-            thread_id = f"{user_id}:{pd.Timestamp.now().strftime('%Y%m%d%H%M%S%f')}"
-            logger.info(f"🆕 新建会话,Thread ID: {thread_id}")
-        
-        config = {
-            "configurable": {
-                "thread_id": thread_id,
-            }
-        }
-        
-        inputs = {
-            "messages": [HumanMessage(content=message)],
-            "user_id": user_id,
-            "thread_id": thread_id,
-            "suggested_next_step": None,
-        }
-
-        try:
-            final_state = await self.agent_executor.ainvoke(inputs, config)
-            answer = final_state["messages"][-1].content
-            logger.info(f"✅ 处理完成 - Final Answer: '{answer}'")
-            return {"success": True, "answer": answer, "thread_id": thread_id}
-        except Exception as e:
-            logger.error(f"❌ 处理过程中发生严重错误 - Thread: {thread_id}: {e}", exc_info=True)
-            return {"success": False, "error": str(e), "thread_id": thread_id}
-    
-    async def get_conversation_history(self, thread_id: str) -> List[Dict[str, Any]]:
-        """从 checkpointer 获取指定线程的对话历史。"""
-        if not self.checkpointer:
-            return []
-        
-        config = {"configurable": {"thread_id": thread_id}}
-        conversation_state = await self.checkpointer.get(config)
-        
-        if not conversation_state:
-            return []
-            
-        history = []
-        for msg in conversation_state['values'].get('messages', []):
-            if isinstance(msg, HumanMessage):
-                role = "human"
-            elif isinstance(msg, ToolMessage):
-                role = "tool"
-            else: # AIMessage
-                role = "ai"
-            
-            history.append({
-                "type": role,
-                "content": msg.content,
-                "tool_calls": getattr(msg, 'tool_calls', None)
-            })
-        return history 

+ 0 - 345
test/custom_react_agent/doc/api_design.md

@@ -1,345 +0,0 @@
-# Custom React Agent API 概要设计
-
-## 1. 项目概述
-
-基于 `./test/custom_react_agent` 模块开发一个RESTful API,提供智能问答服务。用户通过POST请求提交问题,系统通过LangGraph Agent处理并返回格式化的JSON结果。
-
-## 2. API设计
-
-### 2.1 接口定义
-
-**端点**: `POST /api/chat`
-
-**请求格式**:
-```json
-{
-    "question": "请问中国共有多少个充电桩",
-    "user_id": "Paul",      // 可选,默认为"guest"
-    "thread_id": "xxxx"     // 可选,不传则自动生成新会话
-}
-```
-
-**响应格式**:
-```json
-{
-    "code": 200,
-    "message": "操作成功",
-    "success": true,
-    "data": {
-        // 核心响应内容
-        "response": "根据查询结果,当前数据库中共有3个服务区的收入数据...",
-        "sql": "SELECT COUNT(*) FROM charging_stations;",  // 可选,仅当执行SQL时存在
-        "records": {  // 可选,仅当有查询结果时存在
-            "columns": ["服务区名称", "总收入"],
-            "rows": [
-                {"服务区名称": "庐山服务区", "总收入": "7024226.1500"},
-                {"服务区名称": "三清山服务区", "总收入": "6929288.3300"}
-            ],
-            "total_row_count": 3,
-            "is_limited": false
-        },
-        
-        // Agent元数据
-        "react_agent_meta": {
-            "thread_id": "Paul:20250101120030001",
-            "conversation_rounds": 5,
-            "tools_used": ["generate_sql", "run_sql"],
-            "execution_path": ["agent", "prepare_tool_input", "tools", "format_final_response"],
-            "total_messages": 11,
-            "sql_execution_count": 1,
-            "context_injected": true,
-            "agent_version": "custom_react_v1"
-        },
-        
-        "timestamp": "2025-01-01T12:00:30.123456"
-    }
-}
-```
-
-**错误响应格式**:
-```json
-{
-    "code": 500,
-    "message": "SQL执行失败",
-    "success": false,
-    "error": "详细错误信息",
-    "data": {
-        "react_agent_meta": {
-            "thread_id": "Paul:20250101120030001",
-            "execution_path": ["agent", "prepare_tool_input", "tools", "error"],
-            "agent_version": "custom_react_v1"
-        },
-        "timestamp": "2025-01-01T12:00:30.123456"
-    }
-}
-```
-
-### 2.2 状态码定义
-
-| Code | 描述 | 场景 |
-|------|------|------|
-| 200  | 成功 | 正常处理完成 |
-| 400  | 请求错误 | 参数缺失或格式错误 |
-| 500  | 服务器错误 | Agent执行异常 |
-
-## 3. 架构设计
-
-### 3.1 分层处理架构
-
-```
-用户请求 → API层 → Agent处理 → format_final_response节点 → API层包装 → JSON响应
-           ↓        ↓              ↓                      ↓
-        参数验证   核心逻辑      生成data内容           包装HTTP格式
-```
-
-### 3.2 职责分工
-
-#### **API层 (api.py)**
-- 请求参数验证和预处理
-- HTTP响应格式包装 (code, message, success)
-- 错误处理和异常捕获
-- 时间戳添加
-- Thread ID管理
-
-#### **format_final_response节点**
-- 从Agent State提取核心数据
-- 生成response、sql、records字段
-- 收集和整理react_agent_meta元数据
-- 输出标准化的data结构
-
-#### **chat()函数**
-- 保持简化格式,专注于对接shell.py
-- 不参与API响应格式化
-- 保留现有的测试功能
-
-### 3.3 数据流转
-
-```mermaid
-graph TD
-    A[用户POST请求] --> B[API层参数验证]
-    B --> C[调用Agent.chat()]
-    C --> D[Agent执行StateGraph]
-    D --> E[format_final_response节点]
-    E --> F[生成结构化data]
-    F --> G[返回到API层]
-    G --> H[包装HTTP响应格式]
-    H --> I[返回JSON响应]
-```
-
-## 4. Thread ID管理策略
-
-### 4.1 生成规则
-- **格式**: `{user_id}:{timestamp_with_milliseconds}`
-- **示例**: `Paul:20250101120030001`
-- **默认用户**: 未传递user_id时使用`guest`
-
-### 4.2 会话管理
-```python
-# 新会话:不传thread_id
-{"question": "你好", "user_id": "Paul"}
-
-# 继续会话:传递thread_id
-{"question": "详细解释", "user_id": "Paul", "thread_id": "Paul:20250101120030001"}
-
-# 重新开始:不传thread_id
-{"question": "新问题", "user_id": "Paul"}
-```
-
-### 4.3 前端集成建议
-```javascript
-class ChatSession {
-    constructor(userId = 'guest') {
-        this.userId = userId;
-        this.threadId = null;
-    }
-    
-    // 发送消息
-    async sendMessage(question) {
-        const payload = {
-            question,
-            user_id: this.userId
-        };
-        
-        // 继续会话
-        if (this.threadId) {
-            payload.thread_id = this.threadId;
-        }
-        
-        const response = await fetch('/api/chat', {
-            method: 'POST',
-            headers: {'Content-Type': 'application/json'},
-            body: JSON.stringify(payload)
-        });
-        
-        const result = await response.json();
-        
-        // 保存thread_id用于后续对话
-        if (result.success) {
-            this.threadId = result.data.react_agent_meta.thread_id;
-        }
-        
-        return result;
-    }
-    
-    // 开始新会话
-    startNewSession() {
-        this.threadId = null;
-    }
-}
-```
-
-## 5. 实现计划
-
-### 5.1 新增文件
-
-#### **api.py**
-```python
-"""
-Custom React Agent API 服务
-提供RESTful接口用于智能问答
-"""
-from flask import Flask, request, jsonify
-from flask_cors import CORS
-from typing import Optional, Dict, Any
-import asyncio
-from datetime import datetime
-
-def validate_request_data(data: Dict[str, Any]) -> Dict[str, Any]:
-    """验证请求数据"""
-    errors = []
-    
-    question = data.get('question', '')
-    if not question or not question.strip():
-        errors.append('问题不能为空')
-    elif len(question) > 2000:
-        errors.append('问题长度不能超过2000字符')
-    
-    if errors:
-        raise ValueError('; '.join(errors))
-    
-    return {
-        'question': question.strip(),
-        'user_id': data.get('user_id', 'guest'),
-        'thread_id': data.get('thread_id')
-    }
-
-app = Flask(__name__)
-CORS(app)
-
-@app.route("/api/chat", methods=["POST"])
-def chat_endpoint():
-    """智能问答接口"""
-    data = request.get_json()
-    validated_data = validate_request_data(data)
-    # 实现逻辑...
-    return jsonify({"code": 200, "success": True, "data": result})
-```
-
-### 5.2 修改现有文件
-
-#### **agent.py**
-- 修改`_format_final_response_node`方法
-- 增强数据提取和元数据收集逻辑
-- 保持`chat()`函数的简化格式
-
-#### **state.py** 
-- 如果需要,可添加额外的状态字段用于元数据收集
-
-### 5.3 开发步骤
-
-1. **第一阶段:核心功能**
-   - 实现API基础框架
-   - 修改format_final_response节点
-   - 实现基本的请求/响应处理
-
-2. **第二阶段:增强功能**
-   - 完善元数据收集
-   - 实现错误处理机制
-   - 添加参数验证
-
-3. **第三阶段:测试优化**
-   - API测试和调试
-   - 性能优化
-   - 文档完善
-
-## 6. 数据格式详细说明
-
-### 6.1 核心字段
-
-| 字段 | 类型 | 必需 | 说明 |
-|------|------|------|------|
-| response | string | 是 | LLM的回答或SQL结果总结 |
-| sql | string | 否 | 执行的SQL语句,仅在数据库查询时存在 |
-| records | object | 否 | 查询结果数据,仅在有结果时存在 |
-
-### 6.2 records字段结构
-```json
-{
-    "columns": ["列名1", "列名2"],           // 列名数组
-    "rows": [                              // 数据行数组
-        {"列名1": "值1", "列名2": "值2"}
-    ],
-    "total_row_count": 100,                // 总行数
-    "is_limited": false                    // 是否被截断
-}
-```
-
-### 6.3 react_agent_meta字段
-```json
-{
-    "thread_id": "用户会话ID",
-    "conversation_rounds": 5,              // 当前对话轮次
-    "tools_used": ["工具名称"],           // 本次使用的工具
-    "execution_path": ["节点路径"],       // 执行路径
-    "total_messages": 11,                 // 消息总数
-    "sql_execution_count": 1,             // SQL执行次数
-    "context_injected": true,             // 是否注入上下文
-    "agent_version": "custom_react_v1"    // Agent版本
-}
-```
-
-## 7. 兼容性考虑
-
-### 7.1 shell.py适配
-- 保持`chat()`函数的简化返回格式
-- shell.py继续使用原有的交互逻辑
-- 新的API格式不影响命令行测试
-
-### 7.2 现有功能保留
-- 保持所有现有的Agent功能
-- Redis持久化功能继续工作
-- 工具调用机制不变
-
-## 8. 扩展性设计
-
-### 8.1 版本控制
-- API版本通过URL路径区分: `/api/v1/chat`
-- Agent版本通过react_agent_meta.agent_version标识
-
-### 8.2 配置化
-- 支持通过配置文件调整返回字段
-- 支持自定义元数据收集策略
-
-### 8.3 监控和日志
-- 请求/响应日志记录
-- 性能指标收集
-- 错误统计和告警
-
-## 9. 安全考虑
-
-### 9.1 输入验证
-- 问题长度限制
-- user_id格式验证
-- SQL注入防护
-
-### 9.2 资源保护
-- 请求频率限制
-- 超时控制
-- 内存使用监控
-
----
-
-**文档版本**: v1.0  
-**创建时间**: 2025-01-01  
-**作者**: AI Assistant  
-**适用范围**: test/custom_react_agent 模块 

+ 0 - 109
test/custom_react_agent/doc/community_help_request.md

@@ -1,109 +0,0 @@
-# 紧急求助:LangGraph 中向工具传递包含中文的复杂参数时,遭遇双重JSON编码和序列化错误
-
-大家好,
-
-我们正在使用 LangGraph 构建一个基于 `StateGraph` 的 ReAct Agent,其核心需求是在多轮对话中能够理解并利用上下文生成 SQL 查询。为了实现这一点,我们设计了一个流程,需要将完整的对话历史(`history_messages`)作为参数,传递给我们自定义的 `generate_sql` 工具。
-
-然而,当对话历史中包含中文字符时,我们在 LangChain 的工具调用序列化阶段遇到了一个棘手的 `JSONDecodeError`,似乎是由于双重 JSON 编码或不当的 `\uXXXX` 转义引起的。我们已经尝试了多种方案,但都未能解决。恳请社区的专家们不吝赐教!
-
----
-
-## 1. 核心目标与架构
-
-我们的目标是让 `generate_sql` 工具能够访问完整的对话历史,以便在处理“这个服务区怎么样?”这类指代性问题时,能够知道“这个服务区”具体指代的是哪一个。
-
-我们的 `StateGraph` 设计如下:
-
-```mermaid
-graph TD
-    A[START] --> B(agent_node);
-    B --> C{有工具调用?};
-    C -- 是 --> D(prepare_tool_input_node);
-    C -- 否 --> G[END];
-    D --> E(tool_node);
-    E --> F(update_state_after_tool_node);
-    F --> B;
-```
-
-- **`prepare_tool_input_node`**: 这个节点的关键职责是,当检测到 `agent_node` 决定调用 `generate_sql` 时,从 `state` 中提取完整的 `messages` 列表,并将其作为 `history_messages` 参数注入到该工具调用的 `args` 中。
-
-## 2. 遇到的核心问题
-
-当 `prepare_tool_input_node` 成功将 `history_messages`(一个包含 `{'type': 'human', 'content': '你好'}` 这样字典的列表)注入后,图在继续执行时崩溃。
-
-**错误日志如下:**
-
-```
-Invalid Tool Calls:
-  generate_sql (call_e58f408879664da99cd18d)
- Call ID: call_e58f408879664da99cd18d
-  Error: Function generate_sql arguments:
-
-{"question": "\u8bf7\u95ee\u8fd9\u4e2a\u9ad8\u901f...ff1f", "history_messages": [{"type": "human", "content": "\u8bf7\u95ee\u7cfb\u7edf..."}, ...]}
-
-are not valid JSON. Received JSONDecodeError Invalid \escape: line 1 column 1539 (char 1538)
-For troubleshooting, visit: https://python.langchain.com/docs/troubleshooting/errors/OUTPUT_PARSING_FAILURE
-```
-
-## 3. 我们的问题分析
-
-从日志可以看出,传递给 `generate_sql` 的 `args` 字典,在被序列化时,所有的中文字符都被转换成了 `\uXXXX` 的 ASCII 编码格式。
-
-我们推断,问题根源在于 LangChain 在准备将 `tool_calls` 发送给 LLM API 或进行内部处理时,**强制使用 `json.dumps(..., ensure_ascii=True)` 对 `args` 字典进行了序列化**。当这个已经被编码的、包含大量 `\` 转义符的字符串在后续流程中被再次当作 JSON 解析时,便会因为非法的 `\u` 转义序列而导致 `JSONDecodeError`。这似乎是一种我们无法轻易配置的“双重编码”问题。
-
-## 4. 已尝试的失败方案
-
-1.  **在 `ChatOpenAI` 初始化时设置 `ensure_ascii=False`**:
-    - 我们尝试通过 `model_kwargs={"extra_body": {"misc": {"ensure_ascii": False}}}` 来影响序列化行为。
-    - **结果**: 这只影响了从 LLM 返回的最终答案的渲染,但**未能改变 LangChain 对工具调用参数的序列化行为**,错误依旧。
-
-2.  **在工具端解码**:
-    - 我们尝试在 `generate_sql` 函数内部,对接收到的 `history_messages` 字符串进行 `json.loads()` 或其他形式的解码。
-    - **结果**: 失败。因为错误发生在 LangChain 调用我们工具**之前**的序列化阶段,程序流程根本没有机会进入到我们的工具函数内部。
-
-## 5. 寻求帮助的核心问题
-
-我们感觉陷入了一个两难的境地。为了实现上下文感知,我们必须向工具传递复杂的数据;但 LangChain 的序列化机制似乎不允许这样做,尤其是当数据包含非 ASCII 字符时。
-
-我们想请教社区:
-
-1.  **在 LangGraph 中,向工具传递包含非 ASCII 字符的复杂数据结构(如对象或字典列表)的最佳实践到底是什么?**
-2.  **是否有任何方法可以覆盖或配置 `ToolNode` 或其底层对 `tool_calls` `args` 的序列化行为,强制其使用 `ensure_ascii=False`?**
-3.  如果这条路走不通,是否有其他更优雅、更推荐的设计模式,来解决“需要感知完整对话历史的工具”这一常见的场景?(例如,除了我们正在尝试的“参数注入”模式外,还有没有其他的可能性?)
-
----
-
-### 附:关键代码片段
-
-**`_prepare_tool_input_node`**:
-```python
-def _prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-    last_message = state["messages"][-1]
-    if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
-        return {}
-    
-    new_tool_calls = []
-    for tool_call in last_message.tool_calls:
-        if tool_call["name"] == "generate_sql":
-            # 将消息对象列表转换为可序列化的字典列表
-            serializable_history = [
-                {"type": msg.type, "content": msg.content} 
-                for msg in state["messages"]
-            ]
-            
-            modified_args = tool_call["args"].copy()
-            modified_args["history_messages"] = serializable_history
-            new_tool_calls.append({ # ... 重新构建 tool_call ... })
-    
-    last_message.tool_calls = new_tool_calls
-    return {"messages": [last_message]}
-```
-
-**`generate_sql` 工具签名**:
-```python
-@tool
-def generate_sql(question: str, history_messages: List[Dict[str, Any]]) -> str:
-    # ...
-```
-
-任何建议或思路都将对我们产生巨大的帮助。提前感谢大家! 

+ 0 - 1
test/custom_react_agent/doc/network_troubleshooting.md

@@ -1 +0,0 @@
- 

+ 0 - 112
test/custom_react_agent/doc/redesign_summary.md

@@ -1,112 +0,0 @@
-# Custom React Agent 重构概要设计
-
-本文档总结了将原有基于 `create_react_agent` 的 Demo 重构为使用 `StateGraph` 的、具备强大上下文处理能力和流程控制能力的新版 Agent 的概要设计。
-
-## 1. 重构核心目标
-
-- **解决上下文遗忘问题**:确保 Agent 在多轮对话中,尤其是在连续调用 SQL 相关工具时,能够理解并利用之前的对话历史(如上文提到的实体“南城服务区”)。
-- **增强流程控制能力**:对 `generate_sql -> valid_sql -> run_sql` 这一固定流程进行强力引导,防止 LLM“忘记”执行下一步或执行错误,提高 Agent 的可靠性和可预测性。
-- **提升代码健壮性与可维护性**:通过模块化和清晰的职责划分,使代码更易于理解、调试和扩展。
-
-## 2. 最终 `StateGraph` 架构
-
-新架构的核心是一个包含 5 个节点的 `StateGraph`,它取代了原有的 `create_react_agent` 黑盒。
-
-![StateGraph Flow](https://mermaid.ink/img/pako:eNqNVc1qwzAQ_Zde5ZCHLHiVIqUEKjRAG3pQDxuCjM1OsbSwlGTqQIL__SrZdpqkh9zuzs5-d3eDCbygCmoy7dFm9JOv_Qz9Icvob7fso-996es_yz_1b-H4fTTX9e5rWz4etrutRgzHlDxJg2b3OYjSLnoa8HgrdfuRjQZH9r6g_FXd75LpwzT_vNX_8Cufhsnvbnl4-8Xu_sKnmHa2sCX_o7Ud9PsKOluN9J_a_ZOhc179yuVmyvqd7dv_Lltf9tFbifp4eH_XeIkcftEElv_V9N7webczuFf9jOqkehZQPgeZEtbyvMBtyZNO0M2PXqP6y_NfP9V5iZF6SOpPFOlZ0FuKUOtnOcv2jiGeth-PqCKvaNHdzQGfEPSeNRe3Nu4iqtuSPYthf0vnddOZhYzunvZ0uI9k-drffxtMfuLPTNd6u3eCXdOE409UttVPisR5WOY9ZgvtmvFvUYfzdetaulpePqPTPvO9nxNI9yd7VdqNxrzfN8OU-8O4Dqn-kOi9rv9C4EfHi_VfKOG-Y9-6tN7TvFZTy3-q3-m979c_fX_XO995f5EuPnfgt8l4U3I?type=png)
-
-```mermaid
-graph TD
-    A[START] --> B(agent_node);
-    B --> C{有工具调用?};
-    C -- 是 --> D(prepare_tool_input_node);
-    C -- 否 --> G(format_final_response_node);
-    D --> E(tool_node);
-    E --> F(update_state_after_tool_node);
-    F --> B;
-    G --> H[END];
-```
-
-### 2.1. 节点职责
-
-- **`agent_node` (决策者)**
-  - **输入**: 完整的 `state`,包含 `messages` 历史和 `suggested_next_step`。
-  - **职责**:
-    1.  读取完整的对话历史。
-    2.  读取 `state.suggested_next_step` 作为强烈的行动建议 (例如: `valid_sql`, `run_sql`, `analyze_error`)。
-    3.  通过提示工程,将建议和历史结合,让 LLM 做出决策。
-    4.  **输出**: 一个“草稿版”的 `tool_calls`,或决定直接回答的 `AIMessage`。
-
-- **`prepare_tool_input_node` (信息组装者)** - **(新增节点)**
-  - **位置**: `agent_node` 之后, `tool_node` 之前。
-  - **职责**:
-    1.  检查 `agent_node` 输出的 `tool_calls`。
-    2.  如果发现需要上下文的工具(如 `generate_sql`),则从 `state.messages` 中提取完整的对话历史。
-    3.  将提取的历史作为 `history_messages` 参数,**注入**到 `tool_calls` 的 `args` 中。
-  - **输出**: 一个“精装版”的、包含了完整上下文信息的 `tool_calls`。
-
-- **`tool_node` (执行者)**
-  - **职责**: 接收“精装版”的 `tool_calls`,并忠实地调用 `sql_tools.py` 中的工具函数。
-
-- **`update_state_after_tool_node` (流程建议与错误处理器)** - **(新增节点)**
-  - **位置**: `tool_node` 之后。
-  - **职责**:
-    1.  检查刚刚执行的工具名称及其返回结果(成功/失败)。
-    2.  根据预设的逻辑,智能地更新 `state.suggested_next_step` 字段,以精确引导下一步:
-        - **`generate_sql` 成功**: `suggested_next_step` -> `"valid_sql"`
-        - **`generate_sql` 失败**: `suggested_next_step` -> `"answer_with_common_sense"` (引导LLM基于常识回答或向用户解释)
-        - **`valid_sql` 成功**: `suggested_next_step` -> `"run_sql"`
-        - **`valid_sql` 失败**: `suggested_next_step` -> `"analyze_validation_error"` (引导LLM分析错误原因)
-        - **`run_sql` 执行后**: `suggested_next_step` -> `"summarize_final_answer"` (引导LLM基于数据总结)
-  - **输出**: 更新后的 `state`。
-
-- **`format_final_response_node` (最终输出格式化器)** - **(新增节点)**
-  - **位置**: 在 `agent_node` 决定直接回答后,图结束前。
-  - **职责 (v1 - 占位)**:
-    1.  **当前阶段**: 仅作为流程占位符,证明流程已正确进入此节点。
-    2.  在日志中打印一条明确的信息,如 `"[Node] format_final_response - 准备格式化最终输出..."`。
-  - **职责 (未来)**:
-    1.  从 `state` 中提取 LLM 的最终文字总结和最近一次 `run_sql` 的数据(如果存在)。
-    2.  将数据格式化为 Markdown 表格。
-    3.  将文字总结和数据表格合并成一个对用户友好的、结构化的最终答案。
-  - **输出**: 更新 `state` 中最后一条 `AIMessage` 的内容。
-
-## 3. `AgentState` 状态设计
-
-`state.py` 文件将定义 `StateGraph` 中流转的数据结构。
-
-```python
-from typing import TypedDict, Annotated, Optional, List
-from langchain_core.messages import BaseMessage
-
-class AgentState(TypedDict):
-    messages: Annotated[List[BaseMessage], add_messages]
-    user_id: str
-    thread_id: str
-    # 新增字段,用于引导 LLM 的下一步行动
-    suggested_next_step: Optional[str]
-```
-
-- **`messages`**: 核心字段,存储完整的、包含 `HumanMessage`, `AIMessage`, `ToolMessage` 的对话历史。
-- **`suggested_next_step`**: 流程控制的关键。它由 `update_state_after_tool_node` 写入,由 `agent_node` 读取,为 LLM 提供强力的流程引导。
-
-## 4. 工具签名与实现
-
-- **`sql_tools.py`**:
-  - `generate_sql(question: str, history_messages: List[BaseMessage]) -> str`:
-    - 修改其函数签名,明确要求传入 `history_messages`。
-    - 在其内部,将 `question` 和 `history_messages` 组合成更丰富的提示,再交给 Vanna 的 LLM 进行处理,从而解决上下文理解问题。
-  - `valid_sql` 和 `run_sql` 保持简单的输入输出。
-
-## 5. 日志与持久化
-
-- **日志**: 使用 Python 内置的 `logging` 模块,由 `config.py` 控制级别。在每个节点的入口和出口、关键的逻辑判断处打印详细日志,以便清晰地追踪 Agent 的思考和执行链路。
-- **持久化**: 完全复用并保留原有的 `AsyncRedisSaver` 机制。`CustomReactAgent` 在初始化时创建 `checkpointer`,并在编译 `StateGraph` 时传入,以实现自动的状态持久化。
-
-## 6. 优势总结
-
-1.  **双重上下文保障**:
-    - **数据上下文**: 通过 `prepare_tool_input_node` 确保 `generate_sql` 能获取完整的对话历史。
-    - **流程上下文**: 通过 `update_state_after_tool_node` 和 `suggested_next_step` 确保 Agent 遵循预设的执行流程。
-2.  **职责分离**: 每个节点职责单一(决策、准备数据、执行、更新状态),代码清晰,易于维护。
-3.  **高度可控与可预测**: 在给予 LLM 思考空间的同时,通过代码逻辑保证了核心流程的稳定性和可靠性。
-4.  **易于调试**: 详细的日志输出将使追踪和定位问题变得非常简单。 

+ 0 - 114
test/custom_react_agent/doc/result 输出结果.md

@@ -1,114 +0,0 @@
-1.成功生成SQL并执行查询
-POST  http://localhost:8084/api/v0/ask_agent 
-
-{
-    "question": "请按照收入给每个高速服务区进行排名?返回收入最多的前三名服务区?"
-}
-
-
-#正常生成SQL,并完成查询的返回结果
-
-{
-    "code": 200,
-    "data": {
-        "agent_version": "langgraph_v1",
-        "classification_info": {
-            "confidence": 0.9,
-            "method": "rule_based_strong_business",
-            "reason": "强业务特征 - 业务实体: ['核心业务实体:服务区', '支付业务:收入'], 查询意图: ['排名'], SQL: []"
-        },
-        "context_used": false,
-        "conversation_id": "conv_1751199617_5d37a647",
-        "conversation_message": "创建新对话",
-        "conversation_status": "new",
-        "execution_path": [
-            "start",
-            "classify",
-            "agent_sql_generation",
-            "agent_sql_execution",
-            "format_response"
-        ],
-        "records": {
-            "columns": [
-                "服务区名称",
-                "总收入"
-            ],
-            "is_limited": false,
-            "row_count": 3,
-            "rows": [
-                {
-                    "总收入": "7024226.1500",
-                    "服务区名称": "庐山服务区"
-                },
-                {
-                    "总收入": "6929288.3300",
-                    "服务区名称": "三清山服务区"
-                },
-                {
-                    "总收入": "6848435.6700",
-                    "服务区名称": "南城服务区"
-                }
-            ],
-            "total_row_count": 3
-        },
-        "response": "根据收入排名,前三名高速服务区依次为:庐山服务区(702.42万元)、三清山服务区(692.93万元)、南城服务区(684.84万元)。",
-        "routing_mode_source": "config",
-        "routing_mode_used": "hybrid",
-        "session_id": null,
-        "sql": "SELECT service_name AS 服务区名称, SUM(pay_sum) AS 总收入 \nFROM bss_business_day_data \nWHERE delete_ts IS NULL \nGROUP BY service_name \nORDER BY 总收入 DESC NULLS LAST \nLIMIT 3;",
-        "summary": "根据收入排名,前三名高速服务区依次为:庐山服务区(702.42万元)、三清山服务区(692.93万元)、南城服务区(684.84万元)。",
-        "timestamp": "2025-06-29T20:20:56.806141",
-        "type": "DATABASE",
-
-}
-
-前端UI应关注的参数:
-1."response": 它将代替原来的summary,会查询的结果进行总结。
-2."sql":执行查询SQL.
-3."data.records":查询返回的数据,包括表头(data.records.columns)和数据行(data.records.rows)
-
-
-2.未成功生成SQL
-POST  http://localhost:8084/api/v0/ask_agent 
-{
-    "question": "请问每个高速公路服务区的管理经理是谁?"
-}
-
-
-# 返回结果
-{
-    "code": 200,
-    "data": {
-        "agent_version": "langgraph_v1",
-        "classification_info": {
-            "confidence": 0.82,
-            "method": "rule_based_medium_business",
-            "reason": "中等业务特征 - 业务实体: ['核心业务实体:服务区', '核心业务实体:高速公路']"
-        },
-        "context_used": false,
-        "conversation_id": "conv_1751201276_e59f0a07",
-        "conversation_message": "创建新对话",
-        "conversation_status": "new",
-        "execution_path": [
-            "start",
-            "classify",
-            "agent_sql_generation",
-            "format_response"
-        ],
-        "response": "当前提供的上下文信息不足以生成查询服务区对应管理经理的SQL语句。原因如下:\n\n1. 在服务区管理公司表(bss_company)中虽然存在created_by/updated_by字段,但这些字段仅记录数据操作者(系统用户),而非实际的管理经理人员信息。\n\n2. 现有表结构中缺失以下关键实体:\n   - 员工/人员信息表(存储经理姓名等个人信息)\n   - 公司与人员的组织架构表(关联公司ID与员工ID)\n\n3. 当前表间关系仅能查询到服务区所属的管理公司名称(通过bss_service_area.company_id关联bss_company.id),但无法获取具体管理人员的姓名。\n\n需要补充以下信息才能继续:\n- 存储人员信息的表结构(特别是管理岗位人员)\n- 公司与人员的关联关系表结构 请尝试提问其它问题。",
-        "routing_mode_source": "config",
-        "routing_mode_used": "hybrid",
-        "session_id": null,
-        "timestamp": "2025-06-29T20:48:21.351324",
-        "type": "DATABASE",
-        "user_id": "guest"
-    },
-    "message": "操作成功",
-    "success": true
-}
-
-
-前端UI应关注的参数:
-1.没有返回"sql"和"data.records"。
-2."response":当没有返回"sql"和"data.records"的时候,response会返回未能生成SQL的原因,可以返回给客户端
-

+ 0 - 1246
test/custom_react_agent/doc/两个关键问题.md

@@ -1,1246 +0,0 @@
-让我仔细分析你的代码,回答这两个关键问题:
-
-## 1. CustomReactAgent类异步化分析
-
-### ✅ 已经异步化的方法:
-```python
-@classmethod
-async def create(cls):                    # ✅ 异步工厂方法
-async def _async_init(self):              # ✅ 异步初始化
-async def close(self):                    # ✅ 异步清理
-async def chat(self, message, user_id, thread_id):  # ✅ 异步聊天
-async def get_conversation_history(self, thread_id): # ✅ 异步获取历史
-async def get_user_recent_conversations(self, user_id, limit): # ✅ 异步获取对话列表
-```
-
-### ⚠️ **仍然是同步的关键方法(问题所在):**
-```python
-def _should_continue(self, state: AgentState) -> str:           # ❌ 同步
-def _agent_node(self, state: AgentState) -> Dict[str, Any]:     # ❌ 同步
-def _prepare_tool_input_node(self, state: AgentState):         # ❌ 同步  
-def _update_state_after_tool_node(self, state: AgentState):    # ❌ 同步
-def _format_final_response_node(self, state: AgentState):      # ❌ 同步
-```
-
-**问题分析**:你的StateGraph节点是同步的,但是内部调用了异步LLM:
-```python
-def _agent_node(self, state: AgentState) -> Dict[str, Any]:
-    # 这是同步函数,但调用异步LLM
-    response = self.llm_with_tools.invoke(messages_for_llm)  # 🔥 这里有问题
-```
-
-## 2. Redis客户端使用分析
-
-### 你的代码中混用了两种Redis客户端:
-
-**异步Redis客户端(在agent.py中):**
-```python
-from redis.asyncio import Redis
-redis_client = Redis.from_url(config.REDIS_URL, decode_responses=True)
-```
-
-**同步Redis客户端(在api.py和enhanced_redis_api.py中):**
-```python
-import redis  # 同步版本
-redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-```
-
-### 🎯 **关键问题:混用导致的冲突**
-
-## 完整的修复方案## 回答你的两个关键问题:
-
-### 1. CustomReactAgent类异步化分析
-
-**❌ 你的CustomReactAgent并没有完全异步化!**
-
-虽然主要方法是async的,但**StateGraph的节点方法都是同步的**,这正是问题的根源:
-
-```python
-# ❌ 这些是同步方法,但内部调用异步LLM
-def _agent_node(self, state: AgentState) -> Dict[str, Any]:
-    response = self.llm_with_tools.invoke(messages_for_llm)  # 🔥问题在这里
-
-def _prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-def _update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-def _format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-def _should_continue(self, state: AgentState) -> str:
-```
-
-**核心问题**:LangGraph的StateGraph可以支持异步节点,但你没有使用!
-
-### 2. Redis客户端使用分析
-
-**❌ 你混用了同步和异步Redis客户端!**
-
-**异步Redis客户端(在agent.py中):**
-```python
-from redis.asyncio import Redis  # ✅ 异步版本
-redis_client = Redis.from_url(config.REDIS_URL, decode_responses=True)
-```
-
-**同步Redis客户端(在api.py和enhanced_redis_api.py中):**
-```python
-import redis  # ❌ 同步版本
-redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-```
-
-## 🎯 **在你的场景中是否一定要用异步Redis?**
-
-**答案:强烈建议使用,原因如下:**
-
-1. **架构一致性**:既然Flask路由是异步的,Redis操作也应该异步
-2. **避免阻塞**:同步Redis操作会阻塞事件循环,影响并发性能
-3. **消除错误源**:混用是导致"Event loop is closed"的重要原因
-4. **对话机器人特性**:用户期望快速响应,异步Redis能提供更好的并发能力
-
-## 🔧 **最关键的修改点**
-
-我在上面的artifact中提供了完整的修复方案,关键修改包括:
-
-### **1. StateGraph节点全部改为异步:**
-```python
-# ✅ 修改后:异步节点
-async def _async_agent_node(self, state: AgentState) -> Dict[str, Any]:
-    response = await self.llm_with_tools.ainvoke(messages_for_llm)  # 使用ainvoke
-
-async def _async_prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-async def _async_update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-async def _async_format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-async def _async_should_continue(self, state: AgentState) -> str:
-```
-
-### **2. 统一Redis客户端:**
-```python
-# ✅ 统一使用异步Redis
-import redis.asyncio as redis
-self.redis_client = redis.from_url(config.REDIS_URL, decode_responses=True)
-await self.redis_client.ping()
-```
-
-### **3. 移除复杂的事件循环管理:**
-```python
-# ❌ 删除这些函数
-def run_async_safely():  # 删除
-def ensure_agent_ready_sync():  # 删除
-
-# ✅ 直接使用async/await
-@app.route("/api/chat", methods=["POST"])
-async def chat_endpoint():
-    agent_result = await _agent_instance.chat(...)  # 直接调用
-```
-
-## 📋 **修改建议总结**
-
-1. **立即修改**:将StateGraph所有节点改为async版本
-2. **统一Redis**:全部使用`redis.asyncio`
-3. **删除包装**:移除所有`run_async_safely`类的函数
-4. **Flask路由**:核心API路由改为async
-5. **测试验证**:修改后测试Event loop错误是否消失
-
-**按照我提供的修复方案修改后,你就能直接用`python api.py`启动,不会再有任何事件循环问题!**
-
-
-"""
-CustomReactAgent完全异步化修复方案
-解决StateGraph节点同步/异步混用问题,统一Redis客户端使用
-"""
-
-# ============= 修复后的 agent.py =============
-import logging
-import json
-import pandas as pd
-from typing import List, Optional, Dict, Any, Tuple
-from contextlib import AsyncExitStack
-
-from langchain_openai import ChatOpenAI
-from langchain_core.messages import HumanMessage, ToolMessage, BaseMessage, SystemMessage, AIMessage
-from langgraph.graph import StateGraph, END
-from langgraph.prebuilt import ToolNode
-import redis.asyncio as redis  # 🔥 统一使用异步Redis
-try:
-    from langgraph.checkpoint.redis import AsyncRedisSaver
-except ImportError:
-    AsyncRedisSaver = None
-
-# 从新模块导入配置、状态和工具
-try:
-    from . import config
-    from .state import AgentState
-    from .sql_tools import sql_tools
-except ImportError:
-    import config
-    from state import AgentState
-    from sql_tools import sql_tools
-
-logger = logging.getLogger(__name__)
-
-class CustomReactAgent:
-    """
-    完全异步化的 CustomReactAgent
-    所有节点方法都是异步的,统一使用异步Redis客户端
-    """
-    def __init__(self):
-        """私有构造函数,请使用 create() 类方法来创建实例。"""
-        self.llm = None
-        self.tools = None
-        self.agent_executor = None
-        self.checkpointer = None
-        self._exit_stack = None
-        self.redis_client = None  # 🔥 添加Redis客户端引用
-
-    @classmethod
-    async def create(cls):
-        """异步工厂方法,创建并初始化 CustomReactAgent 实例。"""
-        instance = cls()
-        await instance._async_init()
-        return instance
-
-    async def _async_init(self):
-        """异步初始化所有组件。"""
-        logger.info("🚀 开始初始化 CustomReactAgent...")
-
-        # 1. 初始化异步Redis客户端
-        self.redis_client = redis.from_url(config.REDIS_URL, decode_responses=True)
-        try:
-            await self.redis_client.ping()
-            logger.info(f"   ✅ Redis连接成功: {config.REDIS_URL}")
-        except Exception as e:
-            logger.error(f"   ❌ Redis连接失败: {e}")
-            raise
-
-        # 2. 初始化 LLM
-        self.llm = ChatOpenAI(
-            api_key=config.QWEN_API_KEY,
-            base_url=config.QWEN_BASE_URL,
-            model=config.QWEN_MODEL,
-            temperature=0.1,
-            timeout=config.NETWORK_TIMEOUT,
-            max_retries=config.MAX_RETRIES,
-            extra_body={
-                "enable_thinking": False,
-                "misc": {
-                    "ensure_ascii": False
-                }
-            }
-        )
-        logger.info(f"   LLM 已初始化,模型: {config.QWEN_MODEL}")
-
-        # 3. 绑定工具
-        self.tools = sql_tools
-        self.llm_with_tools = self.llm.bind_tools(self.tools)
-        logger.info(f"   已绑定 {len(self.tools)} 个工具。")
-
-        # 4. 初始化 Redis Checkpointer
-        if config.REDIS_ENABLED and AsyncRedisSaver is not None:
-            try:
-                self._exit_stack = AsyncExitStack()
-                checkpointer_manager = AsyncRedisSaver.from_conn_string(config.REDIS_URL)
-                self.checkpointer = await self._exit_stack.enter_async_context(checkpointer_manager)
-                await self.checkpointer.asetup()
-                logger.info(f"   AsyncRedisSaver 持久化已启用: {config.REDIS_URL}")
-            except Exception as e:
-                logger.error(f"   ❌ RedisSaver 初始化失败: {e}", exc_info=True)
-                if self._exit_stack:
-                    await self._exit_stack.aclose()
-                self.checkpointer = None
-        else:
-            logger.warning("   Redis 持久化功能已禁用。")
-
-        # 5. 构建 StateGraph
-        self.agent_executor = self._create_graph()
-        logger.info("   StateGraph 已构建并编译。")
-        logger.info("✅ CustomReactAgent 初始化完成。")
-
-    async def close(self):
-        """清理资源,关闭所有连接。"""
-        if self._exit_stack:
-            await self._exit_stack.aclose()
-            self._exit_stack = None
-            self.checkpointer = None
-            logger.info("✅ RedisSaver 资源已通过 AsyncExitStack 释放。")
-        
-        if self.redis_client:
-            await self.redis_client.aclose()
-            logger.info("✅ Redis客户端已关闭。")
-
-    def _create_graph(self):
-        """定义并编译最终的、正确的 StateGraph 结构。"""
-        builder = StateGraph(AgentState)
-
-        # 🔥 关键修改:所有节点都是异步的
-        builder.add_node("agent", self._async_agent_node)
-        builder.add_node("prepare_tool_input", self._async_prepare_tool_input_node)
-        builder.add_node("tools", ToolNode(self.tools))
-        builder.add_node("update_state_after_tool", self._async_update_state_after_tool_node)
-        builder.add_node("format_final_response", self._async_format_final_response_node)
-
-        # 建立正确的边连接
-        builder.set_entry_point("agent")
-        builder.add_conditional_edges(
-            "agent",
-            self._async_should_continue,  # 🔥 异步条件判断
-            {
-                "continue": "prepare_tool_input",
-                "end": "format_final_response"
-            }
-        )
-        builder.add_edge("prepare_tool_input", "tools")
-        builder.add_edge("tools", "update_state_after_tool")
-        builder.add_edge("update_state_after_tool", "agent")
-        builder.add_edge("format_final_response", END)
-
-        return builder.compile(checkpointer=self.checkpointer)
-
-    async def _async_should_continue(self, state: AgentState) -> str:
-        """🔥 异步版本:判断是继续调用工具还是结束。"""
-        last_message = state["messages"][-1]
-        if hasattr(last_message, "tool_calls") and last_message.tool_calls:
-            return "continue"
-        return "end"
-
-    async def _async_agent_node(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:Agent 节点,使用异步LLM调用。"""
-        logger.info(f"🧠 [Async Node] agent - Thread: {state['thread_id']}")
-        
-        messages_for_llm = list(state["messages"])
-        if state.get("suggested_next_step"):
-            instruction = f"提示:建议下一步使用工具 '{state['suggested_next_step']}'。"
-            messages_for_llm.append(SystemMessage(content=instruction))
-
-        # 🔥 关键修改:使用异步LLM调用
-        import time
-        max_retries = config.MAX_RETRIES
-        for attempt in range(max_retries):
-            try:
-                # 使用异步调用
-                response = await self.llm_with_tools.ainvoke(messages_for_llm)
-                logger.info(f"   ✅ 异步LLM调用成功")
-                return {"messages": [response]}
-                
-            except Exception as e:
-                error_msg = str(e)
-                logger.warning(f"   ⚠️ 异步LLM调用失败 (尝试 {attempt + 1}/{max_retries}): {error_msg}")
-                
-                if any(keyword in error_msg for keyword in [
-                    "Connection error", "APIConnectionError", "ConnectError", 
-                    "timeout", "远程主机强迫关闭", "网络连接"
-                ]):
-                    if attempt < max_retries - 1:
-                        wait_time = config.RETRY_BASE_DELAY ** attempt
-                        logger.info(f"   🔄 网络错误,{wait_time}秒后重试...")
-                        await asyncio.sleep(wait_time)  # 🔥 使用async sleep
-                        continue
-                    else:
-                        logger.error(f"   ❌ 网络连接持续失败,返回降级回答")
-                        sql_data = await self._async_extract_latest_sql_data(state["messages"])
-                        if sql_data:
-                            fallback_content = "抱歉,由于网络连接问题,无法生成完整的文字总结。不过查询已成功执行,结果如下:\n\n" + sql_data
-                        else:
-                            fallback_content = "抱歉,由于网络连接问题,无法完成此次请求。请稍后重试或检查网络连接。"
-                            
-                        fallback_response = AIMessage(content=fallback_content)
-                        return {"messages": [fallback_response]}
-                else:
-                    logger.error(f"   ❌ LLM调用出现非网络错误: {error_msg}")
-                    raise e
-
-    async def _async_prepare_tool_input_node(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:信息组装节点。"""
-        logger.info(f"🛠️ [Async Node] prepare_tool_input - Thread: {state['thread_id']}")
-        
-        last_message = state["messages"][-1]
-        if not hasattr(last_message, "tool_calls") or not last_message.tool_calls:
-            return {"messages": [last_message]}
-
-        new_tool_calls = []
-        for tool_call in last_message.tool_calls:
-            if tool_call["name"] == "generate_sql":
-                logger.info("   检测到 generate_sql 调用,注入历史消息。")
-                modified_args = tool_call["args"].copy()
-                
-                clean_history = []
-                messages_except_current = state["messages"][:-1]
-                
-                for msg in messages_except_current:
-                    if isinstance(msg, HumanMessage):
-                        clean_history.append({
-                            "type": "human",
-                            "content": msg.content
-                        })
-                    elif isinstance(msg, AIMessage):
-                        if msg.content and "[Formatted Output]" in msg.content:
-                            clean_content = msg.content.replace("[Formatted Output]\n", "")
-                            clean_history.append({
-                                "type": "ai",
-                                "content": clean_content
-                            })
-                
-                modified_args["history_messages"] = clean_history
-                logger.info(f"   注入了 {len(clean_history)} 条过滤后的历史消息")
-                
-                new_tool_calls.append({
-                    "name": tool_call["name"],
-                    "args": modified_args,
-                    "id": tool_call["id"],
-                })
-            else:
-                new_tool_calls.append(tool_call)
-        
-        last_message.tool_calls = new_tool_calls
-        return {"messages": [last_message]}
-
-    async def _async_update_state_after_tool_node(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:在工具执行后,更新 suggested_next_step。"""
-        logger.info(f"📝 [Async Node] update_state_after_tool - Thread: {state['thread_id']}")
-        
-        last_tool_message = state['messages'][-1]
-        tool_name = last_tool_message.name
-        tool_output = last_tool_message.content
-        next_step = None
-
-        if tool_name == 'generate_sql':
-            if "失败" in tool_output or "无法生成" in tool_output:
-                next_step = 'answer_with_common_sense'
-            else:
-                next_step = 'valid_sql'
-        elif tool_name == 'valid_sql':
-            if "失败" in tool_output:
-                next_step = 'analyze_validation_error'
-            else:
-                next_step = 'run_sql'
-        elif tool_name == 'run_sql':
-            next_step = 'summarize_final_answer'
-            
-        logger.info(f"   Tool '{tool_name}' executed. Suggested next step: {next_step}")
-        return {"suggested_next_step": next_step}
-
-    async def _async_format_final_response_node(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:最终输出格式化节点。"""
-        logger.info(f"🎨 [Async Node] format_final_response - Thread: {state['thread_id']}")
-        
-        last_message = state['messages'][-1]
-        last_message.content = f"[Formatted Output]\n{last_message.content}"
-        
-        # 生成API格式的数据
-        api_data = await self._async_generate_api_data(state)
-
-        return {
-            "messages": [last_message],
-            "api_data": api_data
-        }
-
-    async def _async_generate_api_data(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:生成API格式的数据结构"""
-        logger.info("📊 异步生成API格式数据...")
-        
-        last_message = state['messages'][-1]
-        response_content = last_message.content
-        
-        if response_content.startswith("[Formatted Output]\n"):
-            response_content = response_content.replace("[Formatted Output]\n", "")
-        
-        api_data = {
-            "response": response_content
-        }
-        
-        sql_info = await self._async_extract_sql_and_data(state['messages'])
-        if sql_info['sql']:
-            api_data["sql"] = sql_info['sql']
-        if sql_info['records']:
-            api_data["records"] = sql_info['records']
-        
-        api_data["react_agent_meta"] = await self._async_collect_agent_metadata(state)
-        
-        logger.info(f"   API数据生成完成,包含字段: {list(api_data.keys())}")
-        return api_data
-
-    async def _async_extract_sql_and_data(self, messages: List[BaseMessage]) -> Dict[str, Any]:
-        """🔥 异步版本:从消息历史中提取SQL和数据记录"""
-        result = {"sql": None, "records": None}
-        
-        last_human_index = -1
-        for i in range(len(messages) - 1, -1, -1):
-            if isinstance(messages[i], HumanMessage):
-                last_human_index = i
-                break
-        
-        if last_human_index == -1:
-            return result
-        
-        current_conversation = messages[last_human_index:]
-        sql_query = None
-        sql_data = None
-        
-        for msg in current_conversation:
-            if isinstance(msg, ToolMessage):
-                if msg.name == 'generate_sql':
-                    content = msg.content
-                    if content and not any(keyword in content for keyword in ["失败", "无法生成", "Database query failed"]):
-                        sql_query = content.strip()
-                elif msg.name == 'run_sql':
-                    try:
-                        import json
-                        parsed_data = json.loads(msg.content)
-                        if isinstance(parsed_data, list) and len(parsed_data) > 0:
-                            columns = list(parsed_data[0].keys()) if parsed_data else []
-                            sql_data = {
-                                "columns": columns,
-                                "rows": parsed_data,
-                                "total_row_count": len(parsed_data),
-                                "is_limited": False
-                            }
-                    except (json.JSONDecodeError, Exception) as e:
-                        logger.warning(f"   解析SQL结果失败: {e}")
-        
-        if sql_query:
-            result["sql"] = sql_query
-        if sql_data:
-            result["records"] = sql_data
-            
-        return result
-
-    async def _async_collect_agent_metadata(self, state: AgentState) -> Dict[str, Any]:
-        """🔥 异步版本:收集Agent元数据"""
-        messages = state['messages']
-        
-        tools_used = []
-        sql_execution_count = 0
-        context_injected = False
-        conversation_rounds = sum(1 for msg in messages if isinstance(msg, HumanMessage))
-        
-        for msg in messages:
-            if isinstance(msg, ToolMessage):
-                if msg.name not in tools_used:
-                    tools_used.append(msg.name)
-                if msg.name == 'run_sql':
-                    sql_execution_count += 1
-            elif isinstance(msg, AIMessage) and hasattr(msg, 'tool_calls') and msg.tool_calls:
-                for tool_call in msg.tool_calls:
-                    tool_name = tool_call.get('name')
-                    if tool_name and tool_name not in tools_used:
-                        tools_used.append(tool_name)
-                    
-                    if (tool_name == 'generate_sql' and 
-                        tool_call.get('args', {}).get('history_messages')):
-                        context_injected = True
-        
-        execution_path = ["agent"]
-        if tools_used:
-            execution_path.extend(["prepare_tool_input", "tools"])
-        execution_path.append("format_final_response")
-        
-        return {
-            "thread_id": state['thread_id'],
-            "conversation_rounds": conversation_rounds,
-            "tools_used": tools_used,
-            "execution_path": execution_path,
-            "total_messages": len(messages),
-            "sql_execution_count": sql_execution_count,
-            "context_injected": context_injected,
-            "agent_version": "custom_react_v1_async"
-        }
-
-    async def _async_extract_latest_sql_data(self, messages: List[BaseMessage]) -> Optional[str]:
-        """🔥 异步版本:提取最新的SQL执行结果"""
-        logger.info("🔍 异步提取最新的SQL执行结果...")
-        
-        last_human_index = -1
-        for i in range(len(messages) - 1, -1, -1):
-            if isinstance(messages[i], HumanMessage):
-                last_human_index = i
-                break
-        
-        if last_human_index == -1:
-            logger.info("   未找到用户消息,跳过SQL数据提取")
-            return None
-        
-        current_conversation = messages[last_human_index:]
-        logger.info(f"   当前对话轮次包含 {len(current_conversation)} 条消息")
-        
-        for msg in reversed(current_conversation):
-            if isinstance(msg, ToolMessage) and msg.name == 'run_sql':
-                logger.info(f"   找到当前对话轮次的run_sql结果: {msg.content[:100]}...")
-                
-                try:
-                    parsed_data = json.loads(msg.content)
-                    formatted_content = json.dumps(parsed_data, ensure_ascii=False, separators=(',', ':'))
-                    logger.info(f"   已转换Unicode转义序列为中文字符")
-                    return formatted_content
-                except json.JSONDecodeError:
-                    logger.warning(f"   SQL结果不是有效JSON格式,返回原始内容")
-                    return msg.content
-        
-        logger.info("   当前对话轮次中未找到run_sql执行结果")
-        return None
-
-    async def chat(self, message: str, user_id: str, thread_id: Optional[str] = None) -> Dict[str, Any]:
-        """🔥 完全异步的聊天处理方法"""
-        if not thread_id:
-            now = pd.Timestamp.now()
-            milliseconds = int(now.microsecond / 1000)
-            thread_id = f"{user_id}:{now.strftime('%Y%m%d%H%M%S')}{milliseconds:03d}"
-            logger.info(f"🆕 新建会话,Thread ID: {thread_id}")
-        
-        config = {
-            "configurable": {
-                "thread_id": thread_id,
-            }
-        }
-        
-        inputs = {
-            "messages": [HumanMessage(content=message)],
-            "user_id": user_id,
-            "thread_id": thread_id,
-            "suggested_next_step": None,
-        }
-
-        try:
-            # 🔥 使用异步调用
-            final_state = await self.agent_executor.ainvoke(inputs, config)
-            answer = final_state["messages"][-1].content
-            
-            sql_data = await self._async_extract_latest_sql_data(final_state["messages"])
-            
-            logger.info(f"✅ 异步处理完成 - Final Answer: '{answer}'")
-            
-            result = {
-                "success": True, 
-                "answer": answer, 
-                "thread_id": thread_id
-            }
-            
-            if sql_data:
-                result["sql_data"] = sql_data
-                logger.info("   📊 已包含SQL原始数据")
-            
-            if "api_data" in final_state:
-                result["api_data"] = final_state["api_data"]
-                logger.info("   🔌 已包含API格式数据")
-            
-            return result
-            
-        except Exception as e:
-            logger.error(f"❌ 异步处理过程中发生严重错误 - Thread: {thread_id}: {e}", exc_info=True)
-            return {"success": False, "error": str(e), "thread_id": thread_id}
-    
-    async def get_conversation_history(self, thread_id: str) -> List[Dict[str, Any]]:
-        """🔥 完全异步的对话历史获取"""
-        if not self.checkpointer:
-            return []
-        
-        config = {"configurable": {"thread_id": thread_id}}
-        try:
-            conversation_state = await self.checkpointer.aget(config)
-        except RuntimeError as e:
-            if "Event loop is closed" in str(e):
-                logger.warning(f"⚠️ Event loop已关闭,返回空结果: {thread_id}")
-                return []
-            else:
-                raise
-        
-        if not conversation_state:
-            return []
-            
-        history = []
-        messages = conversation_state.get('channel_values', {}).get('messages', [])
-        for msg in messages:
-            if isinstance(msg, HumanMessage):
-                role = "human"
-            elif isinstance(msg, ToolMessage):
-                role = "tool"
-            else:
-                role = "ai"
-            
-            history.append({
-                "type": role,
-                "content": msg.content,
-                "tool_calls": getattr(msg, 'tool_calls', None)
-            })
-        return history 
-
-    async def get_user_recent_conversations(self, user_id: str, limit: int = 10) -> List[Dict[str, Any]]:
-        """🔥 完全异步的用户对话列表获取"""
-        if not self.checkpointer:
-            return []
-        
-        try:
-            # 🔥 使用统一的异步Redis客户端
-            pattern = f"checkpoint:{user_id}:*"
-            logger.info(f"🔍 异步扫描模式: {pattern}")
-            
-            user_threads = {}
-            cursor = 0
-            
-            while True:
-                cursor, keys = await self.redis_client.scan(
-                    cursor=cursor,
-                    match=pattern,
-                    count=1000
-                )
-                
-                for key in keys:
-                    try:
-                        key_str = key.decode() if isinstance(key, bytes) else key
-                        parts = key_str.split(':')
-                        
-                        if len(parts) >= 4:
-                            thread_id = f"{parts[1]}:{parts[2]}"
-                            timestamp = parts[2]
-                            
-                            if thread_id not in user_threads:
-                                user_threads[thread_id] = {
-                                    "thread_id": thread_id,
-                                    "timestamp": timestamp,
-                                    "latest_key": key_str
-                                }
-                            else:
-                                if len(parts) > 4 and parts[4] > user_threads[thread_id]["latest_key"].split(':')[4]:
-                                    user_threads[thread_id]["latest_key"] = key_str
-                                    
-                    except Exception as e:
-                        logger.warning(f"解析key {key} 失败: {e}")
-                        continue
-                
-                if cursor == 0:
-                    break
-            
-            # 按时间戳排序
-            sorted_threads = sorted(
-                user_threads.values(),
-                key=lambda x: x["timestamp"],
-                reverse=True
-            )[:limit]
-            
-            # 获取每个thread的详细信息
-            conversations = []
-            for thread_info in sorted_threads:
-                try:
-                    thread_id = thread_info["thread_id"]
-                    thread_config = {"configurable": {"thread_id": thread_id}}
-                    
-                    try:
-                        state = await self.checkpointer.aget(thread_config)
-                    except RuntimeError as e:
-                        if "Event loop is closed" in str(e):
-                            logger.warning(f"⚠️ Event loop已关闭,跳过thread: {thread_id}")
-                            continue
-                        else:
-                            raise
-                    
-                    if state and state.get('channel_values', {}).get('messages'):
-                        messages = state['channel_values']['messages']
-                        preview = self._generate_conversation_preview(messages)
-                        
-                        conversations.append({
-                            "thread_id": thread_id,
-                            "user_id": user_id,
-                            "timestamp": thread_info["timestamp"],
-                            "message_count": len(messages),
-                            "last_message": messages[-1].content if messages else None,
-                            "last_updated": state.get('created_at'),
-                            "conversation_preview": preview,
-                            "formatted_time": self._format_timestamp(thread_info["timestamp"])
-                        })
-                        
-                except Exception as e:
-                    logger.error(f"获取thread {thread_info['thread_id']} 详情失败: {e}")
-                    continue
-            
-            logger.info(f"✅ 异步找到用户 {user_id} 的 {len(conversations)} 个对话")
-            return conversations
-            
-        except Exception as e:
-            logger.error(f"❌ 异步获取用户 {user_id} 对话列表失败: {e}")
-            return []
-
-    def _generate_conversation_preview(self, messages: List[BaseMessage]) -> str:
-        """生成对话预览(保持同步,因为是纯计算)"""
-        if not messages:
-            return "空对话"
-        
-        for msg in messages:
-            if isinstance(msg, HumanMessage):
-                content = str(msg.content)
-                return content[:50] + "..." if len(content) > 50 else content
-        
-        return "系统消息"
-
-    def _format_timestamp(self, timestamp: str) -> str:
-        """格式化时间戳为可读格式(保持同步,因为是纯计算)"""
-        try:
-            if len(timestamp) >= 14:
-                year = timestamp[:4]
-                month = timestamp[4:6]
-                day = timestamp[6:8]
-                hour = timestamp[8:10]
-                minute = timestamp[10:12]
-                second = timestamp[12:14]
-                return f"{year}-{month}-{day} {hour}:{minute}:{second}"
-        except Exception:
-            pass
-        return timestamp
-
-
-# ============= 修复后的 api.py 关键部分 =============
-
-"""
-修复后的 api.py - 统一使用异步Redis客户端,移除复杂的事件循环管理
-"""
-
-import asyncio
-import logging
-import os
-from datetime import datetime
-from typing import Optional, Dict, Any
-
-from flask import Flask, request, jsonify
-import redis.asyncio as redis  # 🔥 统一使用异步Redis
-
-try:
-    from .agent import CustomReactAgent
-except ImportError:
-    from agent import CustomReactAgent
-
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger(__name__)
-
-# 全局Agent实例
-_agent_instance: Optional[CustomReactAgent] = None
-_redis_client: Optional[redis.Redis] = None
-
-def validate_request_data(data: Dict[str, Any]) -> Dict[str, Any]:
-    """验证请求数据(保持不变)"""
-    errors = []
-    
-    question = data.get('question', '')
-    if not question or not question.strip():
-        errors.append('问题不能为空')
-    elif len(question) > 2000:
-        errors.append('问题长度不能超过2000字符')
-    
-    user_id = data.get('user_id', 'guest')
-    if user_id and len(user_id) > 50:
-        errors.append('用户ID长度不能超过50字符')
-    
-    if errors:
-        raise ValueError('; '.join(errors))
-    
-    return {
-        'question': question.strip(),
-        'user_id': user_id or 'guest',
-        'thread_id': data.get('thread_id')
-    }
-
-async def initialize_agent():
-    """🔥 异步初始化Agent"""
-    global _agent_instance, _redis_client
-    
-    if _agent_instance is None:
-        logger.info("🚀 正在异步初始化 Custom React Agent...")
-        try:
-            os.environ['REDIS_URL'] = 'redis://localhost:6379'
-            
-            # 初始化共享的Redis客户端
-            _redis_client = redis.from_url('redis://localhost:6379', decode_responses=True)
-            await _redis_client.ping()
-            
-            _agent_instance = await CustomReactAgent.create()
-            logger.info("✅ Agent 异步初始化完成")
-        except Exception as e:
-            logger.error(f"❌ Agent 异步初始化失败: {e}")
-            raise
-
-async def ensure_agent_ready():
-    """🔥 异步确保Agent实例可用"""
-    global _agent_instance
-    
-    if _agent_instance is None:
-        await initialize_agent()
-    
-    try:
-        test_result = await _agent_instance.get_user_recent_conversations("__test__", 1)
-        return True
-    except Exception as e:
-        logger.warning(f"⚠️ Agent实例不可用: {e}")
-        _agent_instance = None
-        await initialize_agent()
-        return True
-
-async def cleanup_agent():
-    """🔥 异步清理Agent资源"""
-    global _agent_instance, _redis_client
-    
-    if _agent_instance:
-        await _agent_instance.close()
-        logger.info("✅ Agent 资源已异步清理")
-        _agent_instance = None
-    
-    if _redis_client:
-        await _redis_client.aclose()
-        logger.info("✅ Redis客户端已异步关闭")
-        _redis_client = None
-
-# 创建Flask应用
-app = Flask(__name__)
-
-# 🔥 移除所有同步包装函数:run_async_safely, ensure_agent_ready_sync
-
-@app.route("/")
-def root():
-    """健康检查端点(保持同步)"""
-    return jsonify({"message": "Custom React Agent API 服务正在运行"})
-
-@app.route('/health', methods=['GET'])
-def health_check():
-    """健康检查端点(保持同步)"""
-    try:
-        health_status = {
-            "status": "healthy",
-            "agent_initialized": _agent_instance is not None,
-            "timestamp": datetime.now().isoformat()
-        }
-        return jsonify(health_status), 200
-    except Exception as e:
-        logger.error(f"健康检查失败: {e}")
-        return jsonify({"status": "unhealthy", "error": str(e)}), 500
-
-@app.route("/api/chat", methods=["POST"])
-async def chat_endpoint():
-    """🔥 异步智能问答接口"""
-    global _agent_instance
-    
-    # 确保Agent已初始化
-    if not await ensure_agent_ready():
-        return jsonify({
-            "code": 503,
-            "message": "服务未就绪",
-            "success": False,
-            "error": "Agent 初始化失败"
-        }), 503
-    
-    try:
-        data = request.get_json()
-        if not data:
-            return jsonify({
-                "code": 400,
-                "message": "请求参数错误",
-                "success": False,
-                "error": "请求体不能为空"
-            }), 400
-        
-        validated_data = validate_request_data(data)
-        
-        logger.info(f"📨 收到请求 - User: {validated_data['user_id']}, Question: {validated_data['question'][:50]}...")
-        
-        # 🔥 直接调用异步方法,不需要事件循环包装
-        agent_result = await _agent_instance.chat(
-            message=validated_data['question'],
-            user_id=validated_data['user_id'],
-            thread_id=validated_data['thread_id']
-        )
-        
-        if not agent_result.get("success", False):
-            error_msg = agent_result.get("error", "Agent处理失败")
-            logger.error(f"❌ Agent处理失败: {error_msg}")
-            
-            return jsonify({
-                "code": 500,
-                "message": "处理失败",
-                "success": False,
-                "error": error_msg,
-                "data": {
-                    "react_agent_meta": {
-                        "thread_id": agent_result.get("thread_id"),
-                        "agent_version": "custom_react_v1_async",
-                        "execution_path": ["error"]
-                    },
-                    "timestamp": datetime.now().isoformat()
-                }
-            }), 500
-        
-        api_data = agent_result.get("api_data", {})
-        response_data = {
-            **api_data,
-            "timestamp": datetime.now().isoformat()
-        }
-        
-        logger.info(f"✅ 异步请求处理成功 - Thread: {api_data.get('react_agent_meta', {}).get('thread_id')}")
-        
-        return jsonify({
-            "code": 200,
-            "message": "操作成功",
-            "success": True,
-            "data": response_data
-        })
-        
-    except ValueError as e:
-        logger.warning(f"⚠️ 参数验证失败: {e}")
-        return jsonify({
-            "code": 400,
-            "message": "请求参数错误",
-            "success": False,
-            "error": str(e)
-        }), 400
-        
-    except Exception as e:
-        logger.error(f"❌ 未预期的错误: {e}", exc_info=True)
-        return jsonify({
-            "code": 500,
-            "message": "服务器内部错误", 
-            "success": False,
-            "error": "系统异常,请稍后重试"
-        }), 500
-
-@app.route('/api/v0/react/users/<user_id>/conversations', methods=['GET'])
-async def get_user_conversations(user_id: str):
-    """🔥 异步获取用户的聊天记录列表"""
-    global _agent_instance
-    
-    try:
-        limit = request.args.get('limit', 10, type=int)
-        limit = max(1, min(limit, 50))
-        
-        logger.info(f"📋 异步获取用户 {user_id} 的对话列表,限制 {limit} 条")
-        
-        if not await ensure_agent_ready():
-            return jsonify({
-                "success": False,
-                "error": "Agent 未就绪",
-                "timestamp": datetime.now().isoformat()
-            }), 503
-        
-        # 🔥 直接调用异步方法
-        conversations = await _agent_instance.get_user_recent_conversations(user_id, limit)
-        
-        return jsonify({
-            "success": True,
-            "data": {
-                "user_id": user_id,
-                "conversations": conversations,
-                "total_count": len(conversations),
-                "limit": limit
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        logger.error(f"❌ 异步获取用户 {user_id} 对话列表失败: {e}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-@app.route('/api/v0/react/users/<user_id>/conversations/<thread_id>', methods=['GET'])
-async def get_user_conversation_detail(user_id: str, thread_id: str):
-    """🔥 异步获取特定对话的详细历史"""
-    global _agent_instance
-    
-    try:
-        if not thread_id.startswith(f"{user_id}:"):
-            return jsonify({
-                "success": False,
-                "error": f"Thread ID {thread_id} 不属于用户 {user_id}",
-                "timestamp": datetime.now().isoformat()
-            }), 400
-        
-        logger.info(f"📖 异步获取用户 {user_id} 的对话 {thread_id} 详情")
-        
-        if not await ensure_agent_ready():
-            return jsonify({
-                "success": False,
-                "error": "Agent 未就绪",
-                "timestamp": datetime.now().isoformat()
-            }), 503
-        
-        # 🔥 直接调用异步方法
-        history = await _agent_instance.get_conversation_history(thread_id)
-        logger.info(f"✅ 异步成功获取对话历史,消息数量: {len(history)}")
-        
-        if not history:
-            return jsonify({
-                "success": False,
-                "error": f"未找到对话 {thread_id}",
-                "timestamp": datetime.now().isoformat()
-            }), 404
-        
-        return jsonify({
-            "success": True,
-            "data": {
-                "user_id": user_id,
-                "thread_id": thread_id,
-                "message_count": len(history),
-                "messages": history
-            },
-            "timestamp": datetime.now().isoformat()
-        }), 200
-        
-    except Exception as e:
-        import traceback
-        logger.error(f"❌ 异步获取对话 {thread_id} 详情失败: {e}")
-        logger.error(f"❌ 详细错误信息: {traceback.format_exc()}")
-        return jsonify({
-            "success": False,
-            "error": str(e),
-            "timestamp": datetime.now().isoformat()
-        }), 500
-
-# 🔥 异步Redis API(如果还需要直接Redis访问)
-async def get_user_conversations_async(user_id: str, limit: int = 10):
-    """🔥 完全异步的Redis查询函数"""
-    global _redis_client
-    
-    try:
-        if not _redis_client:
-            _redis_client = redis.from_url('redis://localhost:6379', decode_responses=True)
-            await _redis_client.ping()
-        
-        pattern = f"checkpoint:{user_id}:*"
-        logger.info(f"🔍 异步扫描模式: {pattern}")
-        
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = await _redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        logger.info(f"📋 异步找到 {len(keys)} 个keys")
-        
-        # 解析和处理逻辑(与原来相同,但使用异步Redis操作)
-        thread_data = {}
-        for key in keys:
-            try:
-                parts = key.split(':')
-                if len(parts) >= 4:
-                    thread_id = f"{parts[1]}:{parts[2]}"
-                    timestamp = parts[2]
-                    
-                    if thread_id not in thread_data:
-                        thread_data[thread_id] = {
-                            "thread_id": thread_id,
-                            "timestamp": timestamp,
-                            "keys": []
-                        }
-                    thread_data[thread_id]["keys"].append(key)
-            except Exception as e:
-                logger.warning(f"解析key失败 {key}: {e}")
-                continue
-        
-        sorted_threads = sorted(
-            thread_data.values(),
-            key=lambda x: x["timestamp"],
-            reverse=True
-        )[:limit]
-        
-        conversations = []
-        for thread_info in sorted_threads:
-            try:
-                thread_id = thread_info["thread_id"]
-                latest_key = max(thread_info["keys"])
-                
-                # 🔥 使用异步Redis获取
-                key_type = await _redis_client.type(latest_key)
-                
-                data = None
-                if key_type == 'string':
-                    data = await _redis_client.get(latest_key)
-                elif key_type == 'ReJSON-RL':
-                    try:
-                        data = await _redis_client.execute_command('JSON.GET', latest_key)
-                    except Exception as json_error:
-                        logger.error(f"❌ 异步JSON.GET 失败: {json_error}")
-                        continue
-                
-                if data:
-                    try:
-                        import json
-                        checkpoint_data = json.loads(data)
-                        
-                        messages = []
-                        if 'checkpoint' in checkpoint_data:
-                            checkpoint = checkpoint_data['checkpoint']
-                            if isinstance(checkpoint, dict) and 'channel_values' in checkpoint:
-                                channel_values = checkpoint['channel_values']
-                                if isinstance(channel_values, dict) and 'messages' in channel_values:
-                                    messages = channel_values['messages']
-                        
-                        preview = "空对话"
-                        if messages:
-                            for msg in messages:
-                                if isinstance(msg, dict):
-                                    if (msg.get('lc') == 1 and 
-                                        msg.get('type') == 'constructor' and 
-                                        'id' in msg and 
-                                        isinstance(msg['id'], list) and 
-                                        len(msg['id']) >= 4 and
-                                        msg['id'][3] == 'HumanMessage' and
-                                        'kwargs' in msg):
-                                        
-                                        kwargs = msg['kwargs']
-                                        if kwargs.get('type') == 'human' and 'content' in kwargs:
-                                            content = str(kwargs['content'])
-                                            preview = content[:50] + "..." if len(content) > 50 else content
-                                            break
-                        
-                        conversations.append({
-                            "thread_id": thread_id,
-                            "user_id": user_id,
-                            "timestamp": thread_info["timestamp"],
-                            "message_count": len(messages),
-                            "conversation_preview": preview
-                        })
-                        
-                    except json.JSONDecodeError:
-                        logger.error(f"❌ 异步JSON解析失败")
-                        continue
-                    
-            except Exception as e:
-                logger.error(f"异步处理thread {thread_info['thread_id']} 失败: {e}")
-                continue
-        
-        logger.info(f"✅ 异步返回 {len(conversations)} 个对话")
-        return conversations
-        
-    except Exception as e:
-        logger.error(f"❌ 异步Redis查询失败: {e}")
-        return []
-
-# 🔥 异步启动和清理
-async def startup():
-    """应用启动时的异步初始化"""
-    logger.info("🚀 启动异步Flask应用...")
-    try:
-        await initialize_agent()
-        logger.info("✅ Agent 预初始化完成")
-    except Exception as e:
-        logger.error(f"❌ 启动时Agent初始化失败: {e}")
-
-async def shutdown():
-    """应用关闭时的异步清理"""
-    logger.info("🔄 关闭异步Flask应用...")
-    try:
-        await cleanup_agent()
-        logger.info("✅ 资源清理完成")
-    except Exception as e:
-        logger.error(f"❌ 关闭时清理失败: {e}")
-
-if __name__ == "__main__":
-    # 🔥 简化的启动方式 - Flask 3.x 原生支持异步
-    logger.info("🚀 使用Flask内置异步支持启动...")
-    
-    # 信号处理
-    import signal
-    
-    def signal_handler(signum, frame):
-        logger.info("🛑 收到关闭信号,开始清理...")
-        # 在信号处理中,我们只能打印消息,实际清理在程序正常退出时进行
-        print("正在关闭服务...")
-        exit(0)
-    
-    signal.signal(signal.SIGINT, signal_handler)
-    signal.signal(signal.SIGTERM, signal_handler)
-    
-    # 启动Flask应用
-    app.run(host="0.0.0.0", port=8000, debug=False)

+ 0 - 31
test/custom_react_agent/doc/修改默认用户.md

@@ -1,31 +0,0 @@
-1.在执行python shell的时候,默认用户是default-user,启动日志如下:
-
-(.venv) custom_react_agent> python shell.py
-20:24:02 test.custom_react_agent.agent INFO   🚀 开始初始化 CustomReactAgent...
-20:24:02 test.custom_react_agent.agent INFO      ✅ Redis连接成功: redis://localhost:6379
-20:24:05 test.custom_react_agent.agent INFO      LLM 已初始化,模型: qwen3-235b-a22b
-20:24:05 test.custom_react_agent.agent INFO      已绑定 3 个工具。
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 langgraph.checkpoint.redis.aio INFO   Redis client is a standalone client
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 redisvl.index.index INFO   Index already exists, not overwriting.
-20:24:05 langgraph.checkpoint.redis.aio INFO   Redis cluster_mode explicitly set to False, skipping detection.
-20:24:05 test.custom_react_agent.agent INFO      AsyncRedisSaver 持久化已启用: redis://localhost:6379
-20:24:05 test.custom_react_agent.agent INFO      StateGraph 已构建并编译。
-20:24:05 test.custom_react_agent.agent INFO   ✅ CustomReactAgent 初始化完成。
-
-🚀 Custom React Agent Shell (StateGraph Version)
-==================================================
-请输入您的用户ID (默认: default-user):
-
-请修改这个默认用户为guest,不仅仅是修改显示的名字,而且当回车使用默认值的时候,后台也使用guest作为用户。
-
-2.这个修改可能会涉及到 ./test/custom_react_agent/agent.py 或者 ./test/custom_react_agent/shell.py 等。你可以分析一下./test/custom_react_agent/目录下的代码,进行正确的修改。
-
-3.所有的修改都局限在./test/custom_react_agent/目录下,不要修改其他目录的代码。
-
-4.我这个项目的完整路径是:C:\Projects\cursor_projects\Vanna-Chainlit-Chromadb\test\custom_react_agent
-我的项目中有.venv目录,请使用这个目录下的python环境。

+ 0 - 97
test/custom_react_agent/doc/参考方案.md

@@ -1,97 +0,0 @@
-明白了,我将为你准备一个基于 StateGraph 重构的方案,保留 ReAct 模型结构(即 LangChain Agent 使用 tools 推理的能力),同时确保:
-
-* 使用 RedisSaver 进行异步 checkpoint 和历史追踪
-* 每个节点都能访问上下文(如 user\_id、thread\_id)
-* 明确 thread\_id 作为会话隔离标识
-* 保留工具调用逻辑和错误处理能力
-* 结构上为向生产过渡做准备,如拆出 config.py 和增强日志输出
-
-我将整理一份详细的改造建议和新的代码框架提案,请稍等,我整理好后马上给你。
-
-
-# 重构整体架构
-
-* **模块划分**:引入 `config.py` 管理全局配置(模型名称、`redis_url`、日志等级等),主逻辑放在如 `qwen3_agent.py` 的模块中,将 Graph 定义、Agent 类等拆分。`client_api.py` 与 `shell.py` 保持调用接口不变(`get_conversation_history`、`get_user_conversations` 等),只是内部调用新 Agent 类即可。
-* **状态定义**:为 StateGraph 定义自定义状态(如 `ChatState`),包括至少 `messages`(保存对话列表)、`thread_id`、`user_id` 等字段;可使用 `typing.Annotated` 和 `add_messages` 注解以维护消息列表。例如:`class ChatState(TypedDict): messages: Annotated[List[BaseMessage], add_messages]; thread_id: str; user_id: str`。。这样所有节点(包括工具函数)都可访问上下文信息;必要时可在工具函数参数中使用 `Annotated[..., InjectedState]` 注解注入状态字段。
-* **配置文件**:`config.py` 示例:
-
-  ```python
-  MODEL_NAME = "qwen3-235b-a22b"
-  REDIS_URL = "redis://localhost:6379"
-  REDIS_ENABLED = True
-  LOG_LEVEL = logging.INFO
-  ```
-
-  主文件中读取这些常量来初始化模型、Redis、日志等配置。
-
-## Graph 定义流程示意
-
-重构后使用 LangGraph 的 `StateGraph` 明确描述 ReAct 流程。基本流程为:**用户输入→LLM(助手)思考→根据需要调用工具→工具返回结果→LLM 继续思考→…→最终输出答案**。可参考如下伪代码流程:
-
-```python
-from langgraph.graph import StateGraph, START
-from langgraph.prebuilt import ToolNode, tools_condition
-
-# 创建 StateGraph,指定状态类型 ChatState
-builder = StateGraph(ChatState)
-
-# 节点:assistant 调用 LLM(绑定工具)
-def assistant_node(state: ChatState) -> dict:
-    # 调用绑定工具的模型,输入当前消息列表
-    response = llm.bind_tools(tools).invoke(state["messages"])
-    return {"messages": response}
-
-builder.add_node("assistant", assistant_node)
-builder.add_node("tools", ToolNode(tools))  # 工具节点
-
-# 边:开始进入 assistant 节点
-builder.add_edge(START, "assistant")
-# 如果 assistant 输出包含工具调用,则流转到 tools 节点,否则结束
-builder.add_conditional_edges(
-    "assistant",
-    tools_condition  # 有工具调用则进 tools,否则结束
-)
-# tools 处理后回到 assistant 节点(形成循环)
-builder.add_edge("tools", "assistant")
-
-# 编译 StateGraph(稍后传入 checkpointer)
-graph = builder.compile()
-```
-
-如示例所示,**ReAct 图** 有两个核心节点:“assistant” 节点用于调用模型并产生 `ToolCall`;“tools” 节点用于并行执行这些工具调用。通过 `add_conditional_edges` 将 **assistant→tools** 或 **assistant→END** 的流转条件化(`tools_condition` 判断最新 AIMessage 中是否有工具调用)。整体流程为:用户消息进 `assistant`,若有工具调用则进入 `tools` 执行后再回 `assistant` 继续,直至无工具调用后结束并返回最终答案。
-
-## RedisSaver 持久化生命周期
-
-采用 `langgraph-checkpoint-redis` 提供的 **AsyncRedisSaver** 进行短期(线程级)持久化,以便跨会话保持对话历史。**初始化**时,用 Redis URL 创建 AsyncRedisSaver 实例并 `await saver.asetup()` 建立所需索引,如:
-
-```python
-self._exit_stack = AsyncExitStack()
-saver_mgr = AsyncRedisSaver.from_conn_string(config.REDIS_URL)
-self.checkpointer = await self._exit_stack.enter_async_context(saver_mgr)
-await self.checkpointer.asetup()
-```
-
-参照官方示例,可在编译图时将 `checkpointer` 传入 `StateGraph.compile(checkpointer=...)`。这样,图的每次执行都会自动保存状态到 Redis。**关闭**时,通过 `await self._exit_stack.aclose()` 释放 Redis 连接(或使用 `async with AsyncRedisSaver.from_conn_string(...)` 上下文管理器)。
-
-`thread_id` 用作对话流水号:首次对话时自动生成(如 `userID:timestamp`),并在后续调用时传入图的 `config` 部分(`{"configurable":{"thread_id": thread_id}}`),以检索或续接该会话的历史。通过 `checkpointer.get(config)` 可异步取回当前线程的全部消息列表,以实现 `get_conversation_history` 等功能(可参考原代码的取值逻辑)。
-
-## 日志输出与追踪
-
-* **日志框架**:使用 Python 内置的 `logging` 模块,设置基本配置输出到控制台。例如:
-
-  ```python
-  import logging
-  logging.basicConfig(
-      level=config.LOG_LEVEL,
-      format="%(asctime)s %(levelname)s: %(message)s"
-  )
-  logger = logging.getLogger(__name__)
-  ```
-
-  在关键步骤(如初始化模型/Redis、节点执行前后、工具调用等)使用 `logger.info()/debug()/warning()` 记录状态和统计信息,以便实时追踪流程。
-* **控制台追踪**:避免过于复杂的日志管理,简单的 `print` 或 `logger` 输出即可。建议在 `assistant` 节点前后输出提示(如“调用模型,Thread=xxx”),在工具函数开始时输出工具名和参数,在异常时使用 `logger.error()` 打印堆栈信息。这样可在终端实时观察 Agent 的运行轨迹,而无需额外工具监控。
-* **日志等级**:通过 `config.py` 中的 `LOG_LEVEL` 配置调试信息输出级别(如 DEBUG, INFO)。开发时可设为 DEBUG 以观察细节,生产时切换为 INFO 以减少冗余输出。
-
-以上方案在保留原有 LangChain Agent/工具调用风格的同时,采用 StateGraph 明确化流程,各节点可访问共享的上下文状态。使用 AsyncRedisSaver 实现对话历史的持久化,利用 `thread_id` 管理不同会话;日志输出则通过标准 `logging` 模块实现可控的实时跟踪输出。
-
-**参考资料:** LangGraph ReAct 架构示例;RedisSaver 用法指南;状态注入示例。

+ 0 - 107
test/custom_react_agent/doc/增强valid()验证.md

@@ -1,107 +0,0 @@
-好的,以下是根据我们讨论所达成的共识,针对 `valid_sql` 校验流程与 `analyze_validation_error` 路由逻辑的最终建议报告。
-
----
-
-# ✅ 增强 SQL 验证与错误处理流程设计建议(最终版本)
-
-## 一、`valid_sql(sql: str)` 工具函数增强(在 `sql_tools.py` 中)
-
-### ✅ 当前问题:
-
-* 原函数仅检查语法结构和危险关键词。
-* 对于字段/表名错误(如不存在字段),无法检测出来。
-
-### ✅ 解决方案:
-
-* 在函数最后调用:
-
-  ```python
-  vn.run_sql(sql + ' LIMIT 0')
-  ```
-* 使用 `try/except` 捕获字段或表不存在等运行时错误。
-* 将错误信息以字符串形式追加到返回值中,以便后续 LLM 理解错误原因。
-
-### ✅ 示例代码结构:
-
-```python
-@tool
-def valid_sql(sql: str) -> str:
-    ...
-    try:
-        vn.run_sql(sql + " LIMIT 0")
-    except Exception as e:
-        return f"SQL验证失败:执行失败。详细错误:{str(e)}"
-    return "SQL验证通过:语法正确且字段存在"
-```
-
----
-
-## 二、`_async_update_state_after_tool_node` 方法保持不变(在 `agent.py` 中)
-
-### ✅ 保留原逻辑:
-
-```python
-elif tool_name == 'valid_sql':
-    if "失败" in tool_output:
-        next_step = 'analyze_validation_error'
-    else:
-        next_step = 'run_sql'
-```
-
-### ✅ 理由:
-
-* `analyze_validation_error` 不是工具也不是节点,仅是对 LLM 的策略建议;
-* 不应引入新的 state 字段或复杂结构;
-* 路由控制通过 `suggested_next_step` 完成。
-
----
-
-## 三、在 `_async_agent_node` 中针对 `analyze_validation_error` 提供 LLM 指导(重点)
-
-### ✅ 判断条件:
-
-* 如果 `state['suggested_next_step'] == 'analyze_validation_error'`
-* 并且最近一个 ToolMessage 是来自 `valid_sql`
-
-### ✅ 插入一条 SystemMessage 指令,提示 LLM 如何应对 SQL 验证失败。
-
-### ✅ 插入提示词(最终版本):
-
-```text
-说明:上一步 SQL 验证失败。
-- 如果是语法错误,请尝试修复语法错误,并调用 valid_sql 工具重新验证 SQL 是否有效;
-- 如果是字段或表名不存在等问题,请告诉用户缺少的字段或表名,并直接向用户返回基于常识的解释或答案。
-```
-
-### ✅ 示例插入代码段(用于 `_async_agent_node`):
-
-```python
-next_step = state.get("suggested_next_step")
-
-if next_step and next_step != "analyze_validation_error":
-    instruction = f"Suggestion: Consider using the '{next_step}' tool for the next step."
-    messages_for_llm.append(SystemMessage(content=instruction))
-
-if next_step == "analyze_validation_error":
-    for msg in reversed(state["messages"]):
-        if isinstance(msg, ToolMessage) and msg.name == "valid_sql":
-            messages_for_llm.append(SystemMessage(content=(
-                "说明:上一步 SQL 验证失败。\n"
-                "- 如果是语法错误,请尝试修复语法错误,并调用 valid_sql 工具重新验证 SQL 是否有效;\n"
-                "- 如果是字段或表名不存在等问题,请告诉用户缺少的字段或表名,并直接向用户返回基于常识的解释或答案。"
-            )))
-            break
-```
-
----
-
-## ✅ 总结
-
-| 模块                        | 状态     | 操作建议                              |
-| ------------------------- | ------ | --------------------------------- |
-| `valid_sql` 工具            | ✅ 增强完成 | 添加 `run_sql(... LIMIT 0)` 检查字段    |
-| `update_state_after_tool` | ✅ 保持不变 | 继续使用 `'analyze_validation_error'` |
-| `_async_agent_node`       | ✅ 需要优化 | 区分是否为 analyze 分支,添加具体指导语句         |
-
----
-

+ 0 - 144
test/custom_react_agent/doc/异步改造建议参考.md

@@ -1,144 +0,0 @@
-你问得很好!这确实是一个重要的部署问题。让我详细解释一下:
-
-## Flask 异步支持的现状
-
-### 📍 **开发环境 vs 生产环境**
-
-Flask 3.x 的异步支持分两个层面:
-1. **语法支持**:✅ 完全支持 `async def` 路由
-2. **运行时支持**:⚠️ 取决于部署方式
-
-## 不同启动方式对比## 针对你的项目的具体建议
-
-### 🎯 **当前阶段:直接用Flask内置服务器就够了**
-
-**好消息**:你现在就可以改为异步,无需安装任何额外工具!
-
-```python
-# 你的 api.py 可以这样启动
-if __name__ == "__main__":
-    # Flask 3.x 内置服务器支持异步路由
-    app.run(host="0.0.0.0", port=8000, debug=False, threaded=True)
-```
-
-**为什么足够用**:
-- ✅ 对话机器人通常不需要处理数千并发
-- ✅ 10-50个并发用户已经足够大多数应用
-- ✅ 异步的主要收益是避免阻塞,这个Flask内置服务器可以提供
-- ✅ 你的主要问题(Event loop错误)会完全解决
-
-### 📊 **性能对比实测**
-
-我做过类似项目的测试:
-
-| 场景 | Flask内置服务器 | Hypercorn | 实际差异 |
-|------|----------------|-----------|----------|
-| 5个并发用户 | ✅ 完美 | ✅ 完美 | 几乎无差异 |
-| 10个并发用户 | ✅ 良好 | ✅ 优秀 | 轻微差异 |
-| 50个并发用户 | ⚠️ 可用 | ✅ 优秀 | 明显差异 |
-| 100+并发用户 | ❌ 问题 | ✅ 优秀 | 巨大差异 |
-
-**对话机器人的实际并发特点**:
-- 大部分时间:1-10个活跃用户
-- 高峰时间:20-30个并发用户
-- 极少情况:50+并发用户
-
-所以Flask内置服务器**完全够用**。
-
-## 我的分阶段建议
-
-### 🚀 **第一阶段(立即执行)**:修改为异步,保持现有启动方式
-
-```python
-# 修改你的问题路由
-@app.route('/api/v0/react/users/<user_id>/conversations', methods=['GET'])
-async def get_user_conversations(user_id: str):  # 添加async
-    # 移除 run_async_safely,直接用await
-    conversations = await _agent_instance.get_user_recent_conversations(user_id, limit)
-    return jsonify(...)
-
-# 启动方式保持不变
-if __name__ == "__main__":
-    app.run(host="0.0.0.0", port=8000, debug=False, threaded=True)
-```
-
-**预期结果**:
-- ✅ Event loop错误消失
-- ✅ 间歇性数据问题解决  
-- ✅ 代码更简洁
-- ✅ 无需安装额外依赖
-
-### 📈 **第二阶段(可选,当用户增长时)**:升级部署方式
-
-当你的并发用户超过30个时,再考虑:
-
-```bash
-# 安装Hypercorn
-pip install hypercorn
-
-# 简单启动
-hypercorn -b 0.0.0.0:8000 api:app
-```
-
-### 🔧 **灵活的启动脚本**
-
-我推荐这个启动脚本,可以灵活切换:
-
-```python
-# api.py 底部
-if __name__ == "__main__":
-    import os
-    import sys
-    
-    # 检查命令行参数
-    use_hypercorn = '--hypercorn' in sys.argv
-    
-    if use_hypercorn:
-        print("🚀 使用Hypercorn启动(高性能模式)...")
-        try:
-            import hypercorn.asyncio
-            import hypercorn.config
-            import asyncio
-            
-            config = hypercorn.config.Config()
-            config.bind = ["0.0.0.0:8000"]
-            config.workers = 1  # Agent有状态,使用单worker
-            
-            asyncio.run(hypercorn.asyncio.serve(app, config))
-        except ImportError:
-            print("❌ Hypercorn未安装,请运行: pip install hypercorn")
-            print("🔄 降级为Flask内置服务器...")
-            app.run(host="0.0.0.0", port=8000, debug=False, threaded=True)
-    else:
-        print("🚀 使用Flask内置服务器启动...")
-        app.run(host="0.0.0.0", port=8000, debug=False, threaded=True)
-```
-
-**使用方式**:
-```bash
-# 默认:Flask内置服务器
-python api.py
-
-# 高性能:Hypercorn(需要先 pip install hypercorn)
-python api.py --hypercorn
-```
-
-## 总结建议
-
-### 🎯 **立即行动**:
-1. **现在就改异步路由**,继续用 `python api.py` 启动
-2. **删除** `run_async_safely` 等复杂代码
-3. **测试验证**问题是否解决
-
-### 📊 **监控指标**:
-如果发现以下情况,再考虑升级部署:
-- 并发用户经常超过30个
-- 响应时间明显变慢
-- 出现请求排队现象
-
-### ⚡ **关键点**:
-- **Flask 3.x + 异步路由 + 内置服务器 = 你的问题90%都解决了**
-- **Hypercorn等ASGI服务器 = 性能提升,但不是必需品**
-- **你现在的主要问题是技术栈不匹配,不是性能问题**
-
-**建议**:先修改代码为异步,保持现有启动方式,验证问题解决后再考虑是否需要性能优化。这样风险最小,收益最大。

+ 0 - 844
test/custom_react_agent/doc/异步改造方案.md

@@ -1,844 +0,0 @@
-# Flask React Agent异步迁移完整指南
-
-## 项目异步化分析与修改建议
-
-基于对Flask + LangGraph + Redis技术栈的深入分析,本指南提供了将React Agent对话机器人项目完全异步化的详细方案,解决事件循环管理复杂性,实现`python api.py`直接启动的目标。
-
-## 核心问题识别
-
-### 1. 同步/异步混用问题定位
-
-**主要问题区域**:
-- **Flask路由层**:同步路由调用异步Agent方法
-- **事件循环管理**:`run_async_safely`、`ensure_agent_ready_sync`等复杂包装
-- **数据库工具**:SQL工具可能使用同步数据库连接
-- **Redis操作**:可能存在同步/异步Redis客户端混用
-- **Agent执行**:StateGraph节点间的异步调用不一致
-
-### 2. 关键代码模式分析
-
-**典型问题代码模式**:
-```python
-# 问题1:同步路由调用异步Agent
-@app.route("/chat")
-def chat():
-    result = run_async_safely(agent.ainvoke(input_data))  # 复杂事件循环管理
-    return jsonify(result)
-
-# 问题2:复杂的异步包装函数
-def run_async_safely(coro):
-    try:
-        loop = asyncio.get_event_loop()
-    except RuntimeError:
-        loop = asyncio.new_event_loop()
-        asyncio.set_event_loop(loop)
-    return loop.run_until_complete(coro)
-
-# 问题3:Redis同步/异步混用
-redis_client = redis.Redis()  # 同步客户端
-async def some_async_function():
-    await async_redis_client.set("key", "value")  # 异步客户端
-```
-
-## 完整异步迁移方案
-
-### 1. api.py 异步化改造
-
-**原有问题**:
-- Flask路由使用同步函数但调用异步Agent
-- 复杂的事件循环管理函数
-- 启动/清理逻辑不适配异步模式
-
-**修改后的api.py**:
-```python
-import asyncio
-import json
-from contextlib import asynccontextmanager
-from flask import Flask, request, jsonify
-from asgiref.wsgi import WsgiToAsgi
-import redis.asyncio as redis
-from agent import AsyncReactAgent
-from enhanced_redis_api import AsyncRedisAPI
-from config import Config
-
-app = Flask(__name__)
-app.config.from_object(Config)
-
-# 全局异步资源管理
-class AsyncResourceManager:
-    def __init__(self):
-        self.redis_client = None
-        self.redis_api = None
-        self.agent = None
-    
-    async def initialize(self):
-        """初始化所有异步资源"""
-        # Redis客户端
-        self.redis_client = redis.from_url(app.config['REDIS_URL'])
-        
-        # Redis API
-        self.redis_api = AsyncRedisAPI(self.redis_client)
-        
-        # Agent初始化
-        self.agent = AsyncReactAgent(
-            redis_client=self.redis_client,
-            config=app.config
-        )
-        await self.agent.initialize()
-    
-    async def cleanup(self):
-        """清理所有异步资源"""
-        if self.agent:
-            await self.agent.cleanup()
-        if self.redis_client:
-            await self.redis_client.aclose()
-
-# 全局资源管理器
-resource_manager = AsyncResourceManager()
-
-@asynccontextmanager
-async def get_agent():
-    """获取Agent实例的上下文管理器"""
-    if not resource_manager.agent:
-        await resource_manager.initialize()
-    yield resource_manager.agent
-
-# 异步路由实现
-@app.route("/chat", methods=["POST"])
-async def chat():
-    """异步聊天接口"""
-    try:
-        data = request.get_json()
-        message = data.get("message", "")
-        thread_id = data.get("thread_id", "default")
-        
-        async with get_agent() as agent:
-            result = await agent.process_message(message, thread_id)
-            
-        return jsonify({
-            "status": "success",
-            "response": result.get("response", ""),
-            "thread_id": thread_id
-        })
-    
-    except Exception as e:
-        return jsonify({
-            "status": "error",
-            "message": str(e)
-        }), 500
-
-@app.route("/health", methods=["GET"])
-async def health_check():
-    """异步健康检查"""
-    try:
-        async with get_agent() as agent:
-            health_status = await agent.health_check()
-            
-        return jsonify({
-            "status": "healthy",
-            "agent_status": health_status
-        })
-    
-    except Exception as e:
-        return jsonify({
-            "status": "unhealthy",
-            "error": str(e)
-        }), 503
-
-@app.route("/redis/direct", methods=["POST"])
-async def redis_direct():
-    """直接Redis操作接口"""
-    try:
-        data = request.get_json()
-        operation = data.get("operation")
-        key = data.get("key")
-        value = data.get("value")
-        
-        async with resource_manager.redis_client as client:
-            if operation == "set":
-                await client.set(key, value)
-                return jsonify({"status": "success", "message": "Key set"})
-            elif operation == "get":
-                result = await client.get(key)
-                return jsonify({"status": "success", "value": result})
-            else:
-                return jsonify({"status": "error", "message": "Invalid operation"}), 400
-    
-    except Exception as e:
-        return jsonify({"status": "error", "message": str(e)}), 500
-
-# 应用启动和清理
-async def startup():
-    """应用启动时的异步初始化"""
-    await resource_manager.initialize()
-    print("Async resources initialized successfully")
-
-async def cleanup():
-    """应用关闭时的异步清理"""
-    await resource_manager.cleanup()
-    print("Async resources cleaned up successfully")
-
-# 将Flask转换为ASGI应用
-asgi_app = WsgiToAsgi(app)
-
-# 启动函数
-async def main():
-    """主异步函数"""
-    await startup()
-    
-    try:
-        # 使用uvicorn启动ASGI应用
-        import uvicorn
-        config = uvicorn.Config(
-            app=asgi_app,
-            host="0.0.0.0",
-            port=5000,
-            log_level="info"
-        )
-        server = uvicorn.Server(config)
-        await server.serve()
-    
-    except KeyboardInterrupt:
-        print("Shutting down...")
-    
-    finally:
-        await cleanup()
-
-if __name__ == "__main__":
-    asyncio.run(main())
-```
-
-### 2. agent.py 异步化改造
-
-**原有问题**:
-- StateGraph节点混用同步/异步
-- 复杂的事件循环确保函数
-- Redis检查点可能使用同步客户端
-
-**修改后的agent.py**:
-```python
-import asyncio
-import json
-from typing import Dict, Any, Optional
-from langgraph.graph import StateGraph, START, END
-from langgraph.checkpoint.redis.aio import AsyncRedisSaver
-from langgraph.store.redis.aio import AsyncRedisStore
-from langchain_core.messages import HumanMessage, AIMessage
-from langchain_core.runnables import RunnableConfig
-import redis.asyncio as redis
-from state import AgentState
-from sql_tools import AsyncSQLTools
-from config import Config
-
-class AsyncReactAgent:
-    def __init__(self, redis_client: redis.Redis, config: Config):
-        self.redis_client = redis_client
-        self.config = config
-        self.graph = None
-        self.checkpointer = None
-        self.store = None
-        self.sql_tools = None
-        self._initialized = False
-    
-    async def initialize(self):
-        """异步初始化Agent"""
-        if self._initialized:
-            return
-        
-        # 初始化Redis检查点
-        self.checkpointer = AsyncRedisSaver(self.redis_client)
-        await self.checkpointer.asetup()
-        
-        # 初始化Redis存储
-        self.store = AsyncRedisStore(self.redis_client)
-        await self.store.asetup()
-        
-        # 初始化SQL工具
-        self.sql_tools = AsyncSQLTools(self.config.DATABASE_URL)
-        await self.sql_tools.initialize()
-        
-        # 构建状态图
-        self._build_graph()
-        
-        self._initialized = True
-    
-    def _build_graph(self):
-        """构建异步状态图"""
-        builder = StateGraph(AgentState)
-        
-        # 添加异步节点
-        builder.add_node("think", self._think_node)
-        builder.add_node("act", self._act_node)
-        builder.add_node("observe", self._observe_node)
-        
-        # 添加边
-        builder.add_edge(START, "think")
-        builder.add_conditional_edges(
-            "think",
-            self._should_continue,
-            {
-                "continue": "act",
-                "end": END
-            }
-        )
-        builder.add_edge("act", "observe")
-        builder.add_edge("observe", "think")
-        
-        # 编译图
-        self.graph = builder.compile(
-            checkpointer=self.checkpointer,
-            store=self.store
-        )
-    
-    async def _think_node(self, state: AgentState, config: RunnableConfig) -> Dict[str, Any]:
-        """思考节点 - 异步LLM调用"""
-        messages = state.get("messages", [])
-        
-        # 异步调用LLM
-        from langchain_openai import ChatOpenAI
-        llm = ChatOpenAI(model="gpt-4", temperature=0)
-        
-        # 使用异步调用
-        response = await llm.ainvoke(messages)
-        
-        # 更新状态
-        return {
-            "messages": messages + [response],
-            "next_action": self._parse_action(response.content)
-        }
-    
-    async def _act_node(self, state: AgentState, config: RunnableConfig) -> Dict[str, Any]:
-        """行动节点 - 异步工具调用"""
-        action = state.get("next_action")
-        
-        if not action:
-            return {"tool_results": "No action specified"}
-        
-        # 异步执行工具
-        if action["tool"] == "sql_query":
-            result = await self.sql_tools.execute_query(action["query"])
-        elif action["tool"] == "redis_search":
-            result = await self._redis_search(action["query"])
-        else:
-            result = "Unknown tool"
-        
-        return {
-            "tool_results": result,
-            "action_history": state.get("action_history", []) + [action]
-        }
-    
-    async def _observe_node(self, state: AgentState, config: RunnableConfig) -> Dict[str, Any]:
-        """观察节点 - 异步状态更新"""
-        # 异步更新观察结果
-        observation = await self._process_observation(state.get("tool_results"))
-        
-        return {
-            "observations": state.get("observations", []) + [observation],
-            "iteration_count": state.get("iteration_count", 0) + 1
-        }
-    
-    async def _should_continue(self, state: AgentState) -> str:
-        """条件判断 - 是否继续"""
-        if state.get("iteration_count", 0) >= 5:
-            return "end"
-        
-        messages = state.get("messages", [])
-        if messages and "FINAL_ANSWER" in messages[-1].content:
-            return "end"
-        
-        return "continue"
-    
-    async def _redis_search(self, query: str) -> str:
-        """异步Redis搜索"""
-        try:
-            # 使用store进行向量搜索
-            results = await self.store.asearch(
-                namespace=("conversations",),
-                query=query,
-                limit=5
-            )
-            return f"Found {len(results)} relevant conversations"
-        except Exception as e:
-            return f"Redis search error: {str(e)}"
-    
-    async def _process_observation(self, tool_result: str) -> str:
-        """异步处理观察结果"""
-        # 模拟异步处理
-        await asyncio.sleep(0.1)
-        return f"Processed: {tool_result}"
-    
-    def _parse_action(self, content: str) -> Optional[Dict[str, Any]]:
-        """解析行动"""
-        # 简单的行动解析逻辑
-        if "SQL:" in content:
-            query = content.split("SQL:")[-1].strip()
-            return {"tool": "sql_query", "query": query}
-        elif "SEARCH:" in content:
-            query = content.split("SEARCH:")[-1].strip()
-            return {"tool": "redis_search", "query": query}
-        return None
-    
-    async def process_message(self, message: str, thread_id: str) -> Dict[str, Any]:
-        """异步处理消息"""
-        if not self._initialized:
-            await self.initialize()
-        
-        # 构建输入
-        input_data = {
-            "messages": [HumanMessage(content=message)],
-            "thread_id": thread_id
-        }
-        
-        # 异步执行图
-        config = {"configurable": {"thread_id": thread_id}}
-        result = await self.graph.ainvoke(input_data, config)
-        
-        # 提取响应
-        messages = result.get("messages", [])
-        response = messages[-1].content if messages else "No response"
-        
-        return {
-            "response": response,
-            "thread_id": thread_id,
-            "iterations": result.get("iteration_count", 0)
-        }
-    
-    async def health_check(self) -> Dict[str, Any]:
-        """异步健康检查"""
-        try:
-            # 检查Redis连接
-            await self.redis_client.ping()
-            
-            # 检查SQL连接
-            sql_health = await self.sql_tools.health_check()
-            
-            return {
-                "status": "healthy",
-                "redis": "connected",
-                "sql": sql_health,
-                "graph": "compiled" if self.graph else "not_compiled"
-            }
-        
-        except Exception as e:
-            return {
-                "status": "unhealthy",
-                "error": str(e)
-            }
-    
-    async def cleanup(self):
-        """异步清理资源"""
-        if self.sql_tools:
-            await self.sql_tools.cleanup()
-        if self.checkpointer:
-            await self.checkpointer.aclose()
-        if self.store:
-            await self.store.aclose()
-```
-
-### 3. sql_tools.py 异步化改造
-
-**原有问题**:
-- 可能使用同步数据库连接
-- 缺少异步数据库操作
-
-**修改后的sql_tools.py**:
-```python
-import asyncio
-from typing import List, Dict, Any, Optional
-import asyncpg
-from contextlib import asynccontextmanager
-from config import Config
-
-class AsyncSQLTools:
-    def __init__(self, database_url: str):
-        self.database_url = database_url
-        self.connection_pool = None
-        self._initialized = False
-    
-    async def initialize(self):
-        """初始化异步连接池"""
-        if self._initialized:
-            return
-        
-        self.connection_pool = await asyncpg.create_pool(
-            self.database_url,
-            min_size=2,
-            max_size=10,
-            command_timeout=30
-        )
-        self._initialized = True
-    
-    @asynccontextmanager
-    async def get_connection(self):
-        """获取数据库连接的上下文管理器"""
-        if not self.connection_pool:
-            await self.initialize()
-        
-        async with self.connection_pool.acquire() as connection:
-            yield connection
-    
-    async def execute_query(self, query: str, params: Optional[tuple] = None) -> List[Dict[str, Any]]:
-        """异步执行SQL查询"""
-        try:
-            async with self.get_connection() as conn:
-                if params:
-                    result = await conn.fetch(query, *params)
-                else:
-                    result = await conn.fetch(query)
-                
-                # 转换为字典列表
-                return [dict(record) for record in result]
-        
-        except Exception as e:
-            return [{"error": str(e)}]
-    
-    async def execute_non_query(self, query: str, params: Optional[tuple] = None) -> Dict[str, Any]:
-        """异步执行非查询SQL(INSERT, UPDATE, DELETE)"""
-        try:
-            async with self.get_connection() as conn:
-                if params:
-                    result = await conn.execute(query, *params)
-                else:
-                    result = await conn.execute(query)
-                
-                return {"success": True, "rows_affected": result}
-        
-        except Exception as e:
-            return {"success": False, "error": str(e)}
-    
-    async def health_check(self) -> str:
-        """异步健康检查"""
-        try:
-            async with self.get_connection() as conn:
-                result = await conn.fetchval("SELECT 1")
-                return "connected" if result == 1 else "error"
-        
-        except Exception as e:
-            return f"disconnected: {str(e)}"
-    
-    async def cleanup(self):
-        """异步清理连接池"""
-        if self.connection_pool:
-            await self.connection_pool.close()
-```
-
-### 4. enhanced_redis_api.py 异步化改造
-
-**原有问题**:
-- 可能使用同步Redis客户端
-- 缺少异步Redis操作
-
-**修改后的enhanced_redis_api.py**:
-```python
-import json
-import asyncio
-from typing import Any, Dict, List, Optional
-import redis.asyncio as redis
-from contextlib import asynccontextmanager
-
-class AsyncRedisAPI:
-    def __init__(self, redis_client: redis.Redis):
-        self.redis_client = redis_client
-    
-    async def set_data(self, key: str, value: Any, ttl: Optional[int] = None) -> bool:
-        """异步设置数据"""
-        try:
-            serialized_value = json.dumps(value, default=str)
-            if ttl:
-                await self.redis_client.setex(key, ttl, serialized_value)
-            else:
-                await self.redis_client.set(key, serialized_value)
-            return True
-        except Exception as e:
-            print(f"Redis set error: {e}")
-            return False
-    
-    async def get_data(self, key: str) -> Optional[Any]:
-        """异步获取数据"""
-        try:
-            value = await self.redis_client.get(key)
-            if value:
-                return json.loads(value)
-            return None
-        except Exception as e:
-            print(f"Redis get error: {e}")
-            return None
-    
-    async def delete_data(self, key: str) -> bool:
-        """异步删除数据"""
-        try:
-            result = await self.redis_client.delete(key)
-            return result > 0
-        except Exception as e:
-            print(f"Redis delete error: {e}")
-            return False
-    
-    async def search_keys(self, pattern: str) -> List[str]:
-        """异步搜索键"""
-        try:
-            keys = await self.redis_client.keys(pattern)
-            return [key.decode() if isinstance(key, bytes) else key for key in keys]
-        except Exception as e:
-            print(f"Redis search error: {e}")
-            return []
-    
-    async def get_all_data(self, pattern: str = "*") -> Dict[str, Any]:
-        """异步获取所有匹配的数据"""
-        try:
-            keys = await self.search_keys(pattern)
-            if not keys:
-                return {}
-            
-            # 批量获取数据
-            pipeline = self.redis_client.pipeline()
-            for key in keys:
-                pipeline.get(key)
-            
-            values = await pipeline.execute()
-            
-            result = {}
-            for key, value in zip(keys, values):
-                if value:
-                    try:
-                        result[key] = json.loads(value)
-                    except json.JSONDecodeError:
-                        result[key] = value.decode() if isinstance(value, bytes) else value
-            
-            return result
-        
-        except Exception as e:
-            print(f"Redis get_all error: {e}")
-            return {}
-    
-    async def batch_set(self, data: Dict[str, Any], ttl: Optional[int] = None) -> bool:
-        """异步批量设置数据"""
-        try:
-            pipeline = self.redis_client.pipeline()
-            
-            for key, value in data.items():
-                serialized_value = json.dumps(value, default=str)
-                if ttl:
-                    pipeline.setex(key, ttl, serialized_value)
-                else:
-                    pipeline.set(key, serialized_value)
-            
-            await pipeline.execute()
-            return True
-        
-        except Exception as e:
-            print(f"Redis batch_set error: {e}")
-            return False
-    
-    async def health_check(self) -> Dict[str, Any]:
-        """异步健康检查"""
-        try:
-            # 测试连接
-            await self.redis_client.ping()
-            
-            # 获取信息
-            info = await self.redis_client.info()
-            
-            return {
-                "status": "healthy",
-                "connected_clients": info.get("connected_clients", 0),
-                "used_memory": info.get("used_memory_human", "unknown"),
-                "redis_version": info.get("redis_version", "unknown")
-            }
-        
-        except Exception as e:
-            return {
-                "status": "unhealthy",
-                "error": str(e)
-            }
-```
-
-### 5. state.py 优化
-
-**修改后的state.py**:
-```python
-from typing import List, Dict, Any, Optional
-from typing_extensions import TypedDict
-from langgraph.graph.message import add_messages
-from langchain_core.messages import BaseMessage
-
-class AgentState(TypedDict):
-    """Agent状态定义"""
-    messages: List[BaseMessage]  # 消息历史
-    next_action: Optional[Dict[str, Any]]  # 下一步行动
-    tool_results: Optional[str]  # 工具执行结果
-    observations: List[str]  # 观察结果
-    action_history: List[Dict[str, Any]]  # 行动历史
-    iteration_count: int  # 迭代次数
-    thread_id: str  # 线程ID
-    error: Optional[str]  # 错误信息
-```
-
-### 6. config.py 异步优化
-
-**修改后的config.py**:
-```python
-import os
-from typing import Optional
-
-class Config:
-    # 基础配置
-    DEBUG = os.getenv("DEBUG", "False").lower() == "true"
-    
-    # 数据库配置
-    DATABASE_URL = os.getenv("DATABASE_URL", "postgresql://user:password@localhost/dbname")
-    
-    # Redis配置
-    REDIS_URL = os.getenv("REDIS_URL", "redis://localhost:6379")
-    
-    # LLM配置
-    OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
-    
-    # 异步配置
-    MAX_WORKERS = int(os.getenv("MAX_WORKERS", "10"))
-    REQUEST_TIMEOUT = int(os.getenv("REQUEST_TIMEOUT", "30"))
-    
-    # 连接池配置
-    DB_POOL_SIZE = int(os.getenv("DB_POOL_SIZE", "10"))
-    REDIS_POOL_SIZE = int(os.getenv("REDIS_POOL_SIZE", "20"))
-    
-    # Agent配置
-    MAX_ITERATIONS = int(os.getenv("MAX_ITERATIONS", "5"))
-    ENABLE_MEMORY = os.getenv("ENABLE_MEMORY", "True").lower() == "true"
-    
-    @classmethod
-    def validate(cls) -> bool:
-        """验证配置"""
-        required_vars = [
-            "DATABASE_URL",
-            "REDIS_URL",
-            "OPENAI_API_KEY"
-        ]
-        
-        missing_vars = [var for var in required_vars if not getattr(cls, var)]
-        
-        if missing_vars:
-            raise ValueError(f"Missing required environment variables: {missing_vars}")
-        
-        return True
-```
-
-## 启动和部署
-
-### 1. 依赖安装
-
-**requirements.txt**:
-```
-Flask>=2.0.0
-asgiref>=3.5.0
-uvicorn>=0.20.0
-redis>=4.5.0
-asyncpg>=0.27.0
-langgraph>=0.1.0
-langgraph-checkpoint-redis>=0.1.0
-langchain>=0.1.0
-langchain-openai>=0.1.0
-tenacity>=8.0.0
-```
-
-### 2. 启动命令
-
-```bash
-# 直接启动
-python api.py
-
-# 或者使用uvicorn
-uvicorn api:asgi_app --host 0.0.0.0 --port 5000 --reload
-```
-
-### 3. 环境变量配置
-
-**.env**:
-```env
-DEBUG=False
-DATABASE_URL=postgresql://user:password@localhost/dbname
-REDIS_URL=redis://localhost:6379
-OPENAI_API_KEY=your_openai_key
-MAX_WORKERS=10
-REQUEST_TIMEOUT=30
-DB_POOL_SIZE=10
-REDIS_POOL_SIZE=20
-MAX_ITERATIONS=5
-ENABLE_MEMORY=True
-```
-
-## 主要改进点
-
-### 1. 完全消除事件循环复杂性
-- **移除**:`run_async_safely`、`ensure_agent_ready_sync`等函数
-- **替换**:使用ASGI模式和原生async/await
-- **优化**:统一的异步上下文管理
-
-### 2. 彻底解决"Event loop is closed"错误
-- **原因**:Flask创建新事件循环导致的客户端失效
-- **解决**:使用WsgiToAsgi适配器和统一的异步资源管理
-- **预防**:上下文管理器确保资源正确生命周期
-
-### 3. 性能优化
-- **并发处理**:真正的异步I/O操作
-- **连接池**:数据库和Redis连接池化
-- **批量操作**:Redis管道和批量SQL操作
-
-### 4. 架构清晰化
-- **分层设计**:API层、Agent层、工具层分离
-- **资源管理**:统一的异步资源初始化和清理
-- **错误处理**:完整的异步错误处理机制
-
-## 测试和验证
-
-### 1. 功能测试
-```bash
-# 健康检查
-curl http://localhost:5000/health
-
-# 聊天测试
-curl -X POST http://localhost:5000/chat \
-  -H "Content-Type: application/json" \
-  -d '{"message": "Hello", "thread_id": "test123"}'
-
-# Redis直接访问
-curl -X POST http://localhost:5000/redis/direct \
-  -H "Content-Type: application/json" \
-  -d '{"operation": "set", "key": "test", "value": "hello"}'
-```
-
-### 2. 负载测试
-```python
-import asyncio
-import aiohttp
-import time
-
-async def test_load():
-    async with aiohttp.ClientSession() as session:
-        tasks = []
-        for i in range(100):
-            task = session.post(
-                "http://localhost:5000/chat",
-                json={"message": f"Test {i}", "thread_id": f"thread_{i}"}
-            )
-            tasks.append(task)
-        
-        start_time = time.time()
-        results = await asyncio.gather(*tasks)
-        end_time = time.time()
-        
-        print(f"100 requests completed in {end_time - start_time:.2f} seconds")
-
-asyncio.run(test_load())
-```
-
-## 总结
-
-这个完整的异步迁移方案解决了所有核心问题:
-
-1. **彻底消除事件循环管理复杂性**
-2. **实现真正的异步Flask应用**
-3. **解决"Event loop is closed"等错误**
-4. **保持现有功能完全不变**
-5. **支持`python api.py`直接启动**
-
-通过这种架构,你的React Agent项目将具备真正的异步能力,性能显著提升,代码更加清晰和可维护。

+ 0 - 33
test/custom_react_agent/doc/独立测试说明.md

@@ -1,33 +0,0 @@
-# valid_sql 错误处理流程独立测试
-
-## 测试目的
-测试 `valid_sql` 函数及其在 LLM Agent 中的错误处理流程,特别关注当 `valid_sql` 返回错误时,LLM 如何响应和系统如何流转。
-
-## 测试脚本
-- `test_valid_sql_standalone.py` - 完全独立的测试脚本,不修改任何现有代码
-
-## 测试场景
-1. **表不存在** - 测试查询不存在的表时的错误处理
-2. **字段不存在** - 测试查询不存在字段时的错误处理  
-3. **语法错误** - 测试SQL语法错误时的错误处理
-
-## 运行方法
-```bash
-# 激活虚拟环境
-..\..\.venv\Scripts\Activate.ps1
-
-# 运行测试
-python test_valid_sql_standalone.py
-```
-
-## 测试结果
-✅ 所有三种错误场景都能正确捕获和处理:
-- valid_sql 正确识别错误类型
-- LLM 提供合适的错误解释和解决方案
-- 系统流转正常
-
-## 特点
-- 完全独立,不依赖实际数据库连接
-- 不修改任何现有代码
-- 模拟真实的错误处理流程
-- 提供详细的测试日志 

+ 0 - 495
test/custom_react_agent/enhanced_redis_api.py

@@ -1,495 +0,0 @@
-"""
-enhanced_redis_api.py - 完整的Redis直接访问API
-支持include_tools开关参数,可以控制是否包含工具调用信息
-"""
-import redis
-import json
-from typing import List, Dict, Any, Optional
-from datetime import datetime
-import logging
-
-logger = logging.getLogger(__name__)
-
-def get_conversation_detail_from_redis(thread_id: str, include_tools: bool = False) -> Dict[str, Any]:
-    """
-    直接从Redis获取对话详细信息
-    
-    Args:
-        thread_id: 线程ID,格式为 user_id:timestamp
-        include_tools: 是否包含工具调用信息
-                      - True: 返回所有消息(human/ai/tool/system)
-                      - False: 只返回human和ai消息,且清理ai消息中的工具调用信息
-        
-    Returns:
-        包含对话详细信息的字典
-    """
-    try:
-        # 创建Redis连接
-        redis_client = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        redis_client.ping()
-        
-        # 扫描该thread的所有checkpoint keys
-        pattern = f"checkpoint:{thread_id}:*"
-        logger.info(f"🔍 扫描模式: {pattern}, include_tools: {include_tools}")
-        
-        keys = []
-        cursor = 0
-        while True:
-            cursor, batch = redis_client.scan(cursor=cursor, match=pattern, count=1000)
-            keys.extend(batch)
-            if cursor == 0:
-                break
-        
-        logger.info(f"📋 找到 {len(keys)} 个keys")
-        
-        if not keys:
-            redis_client.close()
-            return {
-                "success": False,
-                "error": f"未找到对话 {thread_id}",
-                "data": None
-            }
-        
-        # 获取最新的checkpoint(按key排序,最大的是最新的)
-        latest_key = max(keys)
-        logger.info(f"🔍 使用最新key: {latest_key}")
-        
-        # 检查key类型并获取数据
-        key_type = redis_client.type(latest_key)
-        logger.info(f"🔍 Key类型: {key_type}")
-        
-        data = None
-        if key_type == 'string':
-            data = redis_client.get(latest_key)
-        elif key_type == 'ReJSON-RL':
-            # RedisJSON类型
-            try:
-                data = redis_client.execute_command('JSON.GET', latest_key)
-            except Exception as json_error:
-                logger.error(f"❌ JSON.GET 失败: {json_error}")
-                redis_client.close()
-                return {
-                    "success": False,
-                    "error": f"无法读取RedisJSON数据: {json_error}",
-                    "data": None
-                }
-        else:
-            redis_client.close()
-            return {
-                "success": False,
-                "error": f"不支持的key类型: {key_type}",
-                "data": None
-            }
-        
-        if not data:
-            redis_client.close()
-            return {
-                "success": False,
-                "error": "没有找到有效数据",
-                "data": None
-            }
-        
-        # 解析JSON数据
-        try:
-            checkpoint_data = json.loads(data)
-            logger.info(f"🔍 JSON顶级keys: {list(checkpoint_data.keys())}")
-        except json.JSONDecodeError as e:
-            redis_client.close()
-            return {
-                "success": False,
-                "error": f"JSON解析失败: {e}",
-                "data": None
-            }
-        
-        # 提取消息数据
-        messages = extract_messages_from_checkpoint(checkpoint_data)
-        logger.info(f"🔍 找到 {len(messages)} 条原始消息")
-        
-        # 解析并过滤消息 - 这里是关键的开关逻辑
-        parsed_messages = parse_and_filter_messages(messages, include_tools)
-        
-        # 提取用户ID
-        user_id = thread_id.split(':')[0] if ':' in thread_id else 'unknown'
-        
-        # 生成对话统计信息
-        stats = generate_conversation_stats(parsed_messages, include_tools)
-        
-        redis_client.close()
-        
-        return {
-            "success": True,
-            "data": {
-                "thread_id": thread_id,
-                "user_id": user_id,
-                "include_tools": include_tools,
-                "message_count": len(parsed_messages),
-                "messages": parsed_messages,
-                "stats": stats,
-                "metadata": {
-                    "latest_checkpoint_key": latest_key,
-                    "total_raw_messages": len(messages),
-                    "filtered_message_count": len(parsed_messages),
-                    "filter_mode": "full_conversation" if include_tools else "human_ai_only"
-                }
-            }
-        }
-        
-    except Exception as e:
-        logger.error(f"❌ 获取对话详情失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return {
-            "success": False,
-            "error": str(e),
-            "data": None
-        }
-
-def extract_messages_from_checkpoint(checkpoint_data: Dict[str, Any]) -> List[Any]:
-    """
-    从checkpoint数据中提取消息列表
-    """
-    messages = []
-    
-    # 尝试不同的数据结构路径
-    if 'checkpoint' in checkpoint_data:
-        checkpoint = checkpoint_data['checkpoint']
-        if isinstance(checkpoint, dict) and 'channel_values' in checkpoint:
-            channel_values = checkpoint['channel_values']
-            if isinstance(channel_values, dict) and 'messages' in channel_values:
-                messages = channel_values['messages']
-    
-    # 如果没有找到,尝试直接路径
-    if not messages and 'channel_values' in checkpoint_data:
-        channel_values = checkpoint_data['channel_values']
-        if isinstance(channel_values, dict) and 'messages' in channel_values:
-            messages = channel_values['messages']
-    
-    return messages
-
-def parse_and_filter_messages(raw_messages: List[Any], include_tools: bool) -> List[Dict[str, Any]]:
-    """
-    解析和过滤消息列表 - 关键的开关逻辑实现
-    
-    Args:
-        raw_messages: 原始消息列表
-        include_tools: 是否包含工具消息
-                      - True: 返回所有消息类型
-                      - False: 只返回human/ai,且清理ai消息中的工具信息
-        
-    Returns:
-        解析后的消息列表
-    """
-    parsed_messages = []
-    
-    for msg in raw_messages:
-        try:
-            parsed_msg = parse_single_message(msg)
-            if not parsed_msg:
-                continue
-            
-            msg_type = parsed_msg['type']
-            
-            if include_tools:
-                # 完整模式:包含所有消息类型
-                parsed_messages.append(parsed_msg)
-                logger.debug(f"✅ [完整模式] 包含消息: {msg_type}")
-                
-            else:
-                # 简化模式:只包含human和ai消息
-                if msg_type == 'human':
-                    parsed_messages.append(parsed_msg)
-                    logger.debug(f"✅ [简化模式] 包含human消息")
-                    
-                elif msg_type == 'ai':
-                    # 清理AI消息,移除工具调用信息
-                    cleaned_msg = clean_ai_message_for_simple_mode(parsed_msg)
-                    
-                    # 只包含有实际内容的AI消息
-                    if cleaned_msg['content'].strip() and not cleaned_msg.get('is_intermediate_step', False):
-                        parsed_messages.append(cleaned_msg)
-                        logger.debug(f"✅ [简化模式] 包含有内容的ai消息")
-                    else:
-                        logger.debug(f"⏭️ [简化模式] 跳过空的ai消息或中间步骤")
-                
-                else:
-                    # 跳过tool、system等消息
-                    logger.debug(f"⏭️ [简化模式] 跳过 {msg_type} 消息")
-                    
-        except Exception as e:
-            logger.warning(f"⚠️ 解析消息失败: {e}")
-            continue
-    
-    logger.info(f"📊 解析结果: {len(parsed_messages)} 条消息 (include_tools={include_tools})")
-    return parsed_messages
-
-def parse_single_message(msg: Any) -> Optional[Dict[str, Any]]:
-    """
-    解析单个消息,支持LangChain序列化格式
-    """
-    if isinstance(msg, dict):
-        # LangChain序列化格式
-        if (msg.get('lc') == 1 and 
-            msg.get('type') == 'constructor' and 
-            'id' in msg and 
-            isinstance(msg['id'], list) and 
-            'kwargs' in msg):
-            
-            kwargs = msg['kwargs']
-            msg_class = msg['id'][-1] if msg['id'] else 'Unknown'
-            
-            # 确定消息类型
-            if msg_class == 'HumanMessage':
-                msg_type = 'human'
-            elif msg_class == 'AIMessage':
-                msg_type = 'ai'
-            elif msg_class == 'ToolMessage':
-                msg_type = 'tool'
-            elif msg_class == 'SystemMessage':
-                msg_type = 'system'
-            else:
-                msg_type = 'unknown'
-            
-            # 构建基础消息对象
-            parsed_msg = {
-                "type": msg_type,
-                "content": kwargs.get('content', ''),
-                "id": kwargs.get('id'),
-                "timestamp": datetime.now().isoformat()
-            }
-            
-            # 处理AI消息的特殊字段
-            if msg_type == 'ai':
-                # 工具调用信息
-                tool_calls = kwargs.get('tool_calls', [])
-                parsed_msg['tool_calls'] = tool_calls
-                parsed_msg['has_tool_calls'] = len(tool_calls) > 0
-                
-                # 额外的AI消息元数据
-                additional_kwargs = kwargs.get('additional_kwargs', {})
-                if additional_kwargs:
-                    parsed_msg['additional_kwargs'] = additional_kwargs
-                
-                response_metadata = kwargs.get('response_metadata', {})
-                if response_metadata:
-                    parsed_msg['response_metadata'] = response_metadata
-            
-            # 处理工具消息的特殊字段
-            elif msg_type == 'tool':
-                parsed_msg['tool_name'] = kwargs.get('name')
-                parsed_msg['tool_call_id'] = kwargs.get('tool_call_id')
-                parsed_msg['status'] = kwargs.get('status', 'unknown')
-            
-            return parsed_msg
-            
-        # 简单字典格式
-        elif 'type' in msg:
-            return {
-                "type": msg.get('type', 'unknown'),
-                "content": msg.get('content', ''),
-                "id": msg.get('id'),
-                "timestamp": datetime.now().isoformat()
-            }
-    
-    return None
-
-def clean_ai_message_for_simple_mode(ai_msg: Dict[str, Any]) -> Dict[str, Any]:
-    """
-    调试版本:清理AI消息用于简化模式
-    """
-    original_content = ai_msg.get("content", "")
-    logger.info(f"🔍 清理AI消息,原始内容: '{original_content}', 长度: {len(original_content)}")
-    
-    cleaned_msg = {
-        "type": ai_msg["type"],
-        "content": original_content,
-        "id": ai_msg.get("id"),
-        "timestamp": ai_msg.get("timestamp")
-    }
-    
-    # 处理内容格式化
-    content = original_content.strip()
-    
-    # 注释掉 [Formatted Output] 清理逻辑 - 源头已不生成前缀
-    # if '[Formatted Output]' in content:
-    #     logger.info(f"🔍 发现 [Formatted Output] 标记")
-    #     
-    #     if content.startswith('[Formatted Output]\n'):
-    #         # 去掉标记,保留后面的实际内容
-    #         actual_content = content.replace('[Formatted Output]\n', '')
-    #         logger.info(f"🔍 去除标记后的内容: '{actual_content}', 长度: {len(actual_content)}")
-    #         cleaned_msg["content"] = actual_content
-    #         content = actual_content
-    #     elif content == '[Formatted Output]' or content == '[Formatted Output]\n':
-    #         # 如果只有标记没有内容
-    #         logger.info(f"🔍 只有标记没有实际内容")
-    #         cleaned_msg["content"] = ""
-    #         cleaned_msg["is_intermediate_step"] = True
-    #         content = ""
-    
-    # 如果清理后内容为空或只有空白,标记为中间步骤
-    if not content.strip():
-        logger.info(f"🔍 内容为空,标记为中间步骤")
-        cleaned_msg["is_intermediate_step"] = True
-        cleaned_msg["content"] = ""
-    
-    # 添加简化模式标记
-    cleaned_msg["simplified"] = True
-    
-    logger.info(f"🔍 清理结果: '{cleaned_msg['content']}', 是否中间步骤: {cleaned_msg.get('is_intermediate_step', False)}")
-    
-    return cleaned_msg
-
-def generate_conversation_stats(messages: List[Dict[str, Any]], include_tools: bool) -> Dict[str, Any]:
-    """
-    生成对话统计信息
-    
-    Args:
-        messages: 解析后的消息列表
-        include_tools: 是否包含工具信息(影响统计内容)
-        
-    Returns:
-        统计信息字典
-    """
-    stats = {
-        "total_messages": len(messages),
-        "human_messages": 0,
-        "ai_messages": 0,
-        "conversation_rounds": 0,
-        "include_tools_mode": include_tools
-    }
-    
-    # 添加工具相关统计(仅在include_tools=True时)
-    if include_tools:
-        stats.update({
-            "tool_messages": 0,
-            "system_messages": 0,
-            "messages_with_tools": 0,
-            "unique_tools_used": set()
-        })
-    
-    for msg in messages:
-        msg_type = msg.get('type', 'unknown')
-        
-        if msg_type == 'human':
-            stats["human_messages"] += 1
-        elif msg_type == 'ai':
-            stats["ai_messages"] += 1
-            
-            # 工具相关统计
-            if include_tools and msg.get('has_tool_calls', False):
-                stats["messages_with_tools"] += 1
-                
-                # 统计使用的工具
-                tool_calls = msg.get('tool_calls', [])
-                for tool_call in tool_calls:
-                    if isinstance(tool_call, dict) and 'name' in tool_call:
-                        stats["unique_tools_used"].add(tool_call['name'])
-                        
-        elif include_tools:
-            if msg_type == 'tool':
-                stats["tool_messages"] += 1
-                
-                # 记录工具名称
-                tool_name = msg.get('tool_name')
-                if tool_name:
-                    stats["unique_tools_used"].add(tool_name)
-                    
-            elif msg_type == 'system':
-                stats["system_messages"] += 1
-    
-    # 计算对话轮次
-    stats["conversation_rounds"] = stats["human_messages"]
-    
-    # 转换set为list(JSON序列化)
-    if include_tools and "unique_tools_used" in stats:
-        stats["unique_tools_used"] = list(stats["unique_tools_used"])
-    
-    return stats
-
-def format_timestamp_readable(timestamp: str) -> str:
-    """格式化时间戳为可读格式"""
-    try:
-        if len(timestamp) >= 14:
-            year = timestamp[:4]
-            month = timestamp[4:6]
-            day = timestamp[6:8]
-            hour = timestamp[8:10]
-            minute = timestamp[10:12]
-            second = timestamp[12:14]
-            return f"{year}-{month}-{day} {hour}:{minute}:{second}"
-    except Exception:
-        pass
-    return timestamp
-
-
-# =================== 测试函数 ===================
-
-def test_conversation_detail_with_switch():
-    """
-    测试对话详情获取功能,重点测试include_tools开关
-    """
-    print("🧪 测试对话详情获取(开关参数测试)...")
-    
-    # 测试thread_id(请替换为实际存在的thread_id)
-    test_thread_id = "wang:20250709195048728323"
-    
-    print(f"\n1. 测试完整模式(include_tools=True)...")
-    result_full = get_conversation_detail_from_redis(test_thread_id, include_tools=True)
-    
-    if result_full['success']:
-        data = result_full['data']
-        print(f"   ✅ 成功获取完整对话")
-        print(f"   📊 消息数量: {data['message_count']}")
-        print(f"   📈 统计信息: {data['stats']}")
-        print(f"   🔧 包含工具: {data['stats'].get('tool_messages', 0)} 条工具消息")
-        
-        # 显示消息类型分布
-        message_types = {}
-        for msg in data['messages']:
-            msg_type = msg['type']
-            message_types[msg_type] = message_types.get(msg_type, 0) + 1
-        print(f"   📋 消息类型分布: {message_types}")
-        
-    else:
-        print(f"   ❌ 获取失败: {result_full['error']}")
-    
-    print(f"\n2. 测试简化模式(include_tools=False)...")
-    result_simple = get_conversation_detail_from_redis(test_thread_id, include_tools=False)
-    
-    if result_simple['success']:
-        data = result_simple['data']
-        print(f"   ✅ 成功获取简化对话")
-        print(f"   📊 消息数量: {data['message_count']}")
-        print(f"   📈 统计信息: {data['stats']}")
-        
-        # 显示消息类型分布
-        message_types = {}
-        for msg in data['messages']:
-            msg_type = msg['type']
-            message_types[msg_type] = message_types.get(msg_type, 0) + 1
-        print(f"   📋 消息类型分布: {message_types}")
-        
-        # 显示前几条消息示例
-        print(f"   💬 消息示例:")
-        for i, msg in enumerate(data['messages'][:4]):
-            content_preview = str(msg['content'])[:50] + "..." if len(str(msg['content'])) > 50 else str(msg['content'])
-            simplified_mark = " [简化]" if msg.get('simplified') else ""
-            print(f"      [{i+1}] {msg['type']}: {content_preview}{simplified_mark}")
-            
-    else:
-        print(f"   ❌ 获取失败: {result_simple['error']}")
-    
-    # 比较两种模式
-    if result_full['success'] and result_simple['success']:
-        full_count = result_full['data']['message_count']
-        simple_count = result_simple['data']['message_count']
-        difference = full_count - simple_count
-        
-        print(f"\n3. 模式比较:")
-        print(f"   📊 完整模式消息数: {full_count}")
-        print(f"   📊 简化模式消息数: {simple_count}")
-        print(f"   📊 过滤掉的消息数: {difference}")
-        print(f"   🎯 过滤效果: {'有效' if difference > 0 else '无差异'}")
-
-if __name__ == "__main__":
-    test_conversation_detail_with_switch()

+ 0 - 20
test/custom_react_agent/requirements.txt

@@ -1,20 +0,0 @@
-# ASGI支持依赖(解决事件循环冲突问题)
-uvicorn>=0.24.0
-asgiref>=3.7.0
-
-# Flask相关
-flask>=3.0.0
-redis>=4.0.0
-
-# LangChain和LangGraph
-langchain>=0.1.0
-langchain-openai>=0.0.5
-langgraph>=0.0.28
-
-# 数据处理
-pandas>=1.5.0
-
-# 其他可能需要的依赖
-requests>=2.28.0
-python-dotenv>=0.19.0
-httpx>=0.24.0

+ 0 - 301
test/custom_react_agent/shell.py

@@ -1,301 +0,0 @@
-"""
-重构后的 CustomReactAgent 的交互式命令行客户端
-"""
-# from __future__ import annotations
-
-import asyncio
-import logging
-import sys
-import os
-import json
-from typing import List, Dict, Any
-
-# 将当前目录和项目根目录添加到 sys.path
-CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
-PROJECT_ROOT = os.path.abspath(os.path.join(CURRENT_DIR, '..', '..'))
-sys.path.insert(0, CURRENT_DIR)  # 当前目录优先
-sys.path.insert(1, PROJECT_ROOT)  # 项目根目录
-
-# 导入 Agent 和配置(简化版本)
-from agent import CustomReactAgent
-import config
-
-# 配置日志
-logging.basicConfig(level=config.LOG_LEVEL, format=config.LOG_FORMAT)
-logger = logging.getLogger(__name__)
-
-class CustomAgentShell:
-    """新 Agent 的交互式 Shell 客户端"""
-
-    def __init__(self, agent: CustomReactAgent):
-        """私有构造函数,请使用 create() 类方法。"""
-        self.agent = agent
-        self.user_id: str = config.DEFAULT_USER_ID
-        self.thread_id: str | None = None
-        self.recent_conversations: List[Dict[str, Any]] = []  # 存储最近的对话列表
-
-    @classmethod
-    async def create(cls):
-        """异步工厂方法,创建 Shell 实例。"""
-        agent = await CustomReactAgent.create()
-        return cls(agent)
-
-    async def close(self):
-        """关闭 Agent 资源。"""
-        if self.agent:
-            await self.agent.close()
-
-    async def _fetch_recent_conversations(self, user_id: str, limit: int = 5) -> List[Dict[str, Any]]:
-        """获取最近的对话列表"""
-        try:
-            logger.info(f"🔍 获取用户 {user_id} 的最近 {limit} 次对话...")
-            conversations = await self.agent.get_user_recent_conversations(user_id, limit)
-            logger.info(f"✅ 成功获取 {len(conversations)} 个对话")
-            return conversations
-        except Exception as e:
-            logger.error(f"❌ 获取对话列表失败: {e}")
-            print(f"⚠️ 获取历史对话失败: {e}")
-            print("   将直接开始新对话...")
-            return []
-
-    def _display_conversation_list(self, conversations: List[Dict[str, Any]]) -> None:
-        """显示对话列表"""
-        if not conversations:
-            print("📭 暂无历史对话,将开始新对话。")
-            return
-        
-        print("\n📋 最近的对话记录:")
-        print("-" * 60)
-        
-        for i, conv in enumerate(conversations, 1):
-            thread_id = conv.get('thread_id', '')
-            formatted_time = conv.get('formatted_time', '')
-            preview = conv.get('conversation_preview', '无预览')
-            message_count = conv.get('message_count', 0)
-            
-            print(f"[{i}] {formatted_time} - {preview}")
-            print(f"    Thread ID: {thread_id} | 消息数: {message_count}")
-            print()
-        
-        print("💡 选择方式:")
-        print("   - 输入序号 (1-5): 选择对应的对话")
-        print("   - 输入 Thread ID: 直接指定对话")
-        print("   - 输入日期 (YYYY-MM-DD): 选择当天最新对话")
-        print("   - 输入 'new': 开始新对话")
-        print("   - 直接输入问题: 开始新对话")
-        print("-" * 60)
-
-    def _parse_conversation_selection(self, user_input: str) -> Dict[str, Any]:
-        """解析用户的对话选择"""
-        user_input = user_input.strip()
-        
-        # 检查是否是数字序号 (1-5)
-        if user_input.isdigit():
-            index = int(user_input)
-            if 1 <= index <= len(self.recent_conversations):
-                selected_conv = self.recent_conversations[index - 1]
-                return {
-                    "type": "select_by_index",
-                    "thread_id": selected_conv["thread_id"],
-                    "preview": selected_conv["conversation_preview"]
-                }
-            else:
-                return {"type": "invalid_index", "message": f"序号 {index} 无效,请输入 1-{len(self.recent_conversations)}"}
-        
-        # 检查是否是 Thread ID 格式 (包含冒号)
-        if ':' in user_input and len(user_input.split(':')) == 2:
-            user_part, timestamp_part = user_input.split(':')
-            # 简单验证格式
-            if user_part == self.user_id and timestamp_part.isdigit():
-                # 检查该Thread ID是否存在于历史对话中
-                for conv in self.recent_conversations:
-                    if conv["thread_id"] == user_input:
-                        return {
-                            "type": "select_by_thread_id",
-                            "thread_id": user_input,
-                            "preview": conv["conversation_preview"]
-                        }
-                return {"type": "thread_not_found", "message": f"Thread ID {user_input} 不存在于最近的对话中"}
-        
-        # 检查是否是日期格式 (YYYY-MM-DD)
-        import re
-        date_pattern = r'^\d{4}-\d{2}-\d{2}$'
-        if re.match(date_pattern, user_input):
-            # 查找该日期的最新对话
-            target_date = user_input.replace('-', '')  # 转换为 YYYYMMDD 格式
-            for conv in self.recent_conversations:
-                timestamp = conv.get('timestamp', '')
-                if timestamp.startswith(target_date):
-                    return {
-                        "type": "select_by_date",
-                        "thread_id": conv["thread_id"],
-                        "preview": f"日期 {user_input} 的对话: {conv['conversation_preview']}"
-                    }
-            return {"type": "no_date_match", "message": f"未找到 {user_input} 的对话"}
-        
-        # 检查是否是 'new' 命令
-        if user_input.lower() == 'new':
-            return {"type": "new_conversation"}
-        
-        # 其他情况当作新问题处理
-        return {"type": "new_question", "question": user_input}
-
-    async def start(self):
-        """启动 Shell 界面。"""
-        print("\n🚀 Custom React Agent Shell (StateGraph Version)")
-        print("=" * 50)
-        
-        # 获取用户ID
-        user_input = input(f"请输入您的用户ID (默认: {self.user_id}): ").strip()
-        if user_input:
-            self.user_id = user_input
-        
-        print(f"👤 当前用户: {self.user_id}")
-        
-        # 获取并显示最近的对话列表
-        print("\n🔍 正在获取历史对话...")
-        self.recent_conversations = await self._fetch_recent_conversations(self.user_id, 5)
-        self._display_conversation_list(self.recent_conversations)
-        
-        print("\n💬 开始对话 (输入 'exit' 或 'quit' 退出)")
-        print("-" * 50)
-        
-        await self._chat_loop()
-
-    async def _chat_loop(self):
-        """主要的聊天循环。"""
-        while True:
-            user_input = input(f"👤 [{self.user_id[:8]}]> ").strip()
-            
-            if not user_input:
-                continue
-            
-            if user_input.lower() in ['quit', 'exit']:
-                raise KeyboardInterrupt  # 优雅退出
-            
-            if user_input.lower() == 'new':
-                self.thread_id = None
-                print("🆕 已开始新会话。")
-                continue
-
-            if user_input.lower() == 'history':
-                await self._show_current_history()
-                continue
-            
-            # 如果还没有选择对话,且有历史对话,则处理对话选择
-            if self.thread_id is None and self.recent_conversations:
-                selection = self._parse_conversation_selection(user_input)
-                
-                if selection["type"] == "select_by_index":
-                    self.thread_id = selection["thread_id"]
-                    print(f"📖 已选择对话: {selection['preview']}")
-                    print(f"💬 Thread ID: {self.thread_id}")
-                    print("现在可以在此对话中继续聊天...\n")
-                    continue
-                
-                elif selection["type"] == "select_by_thread_id":
-                    self.thread_id = selection["thread_id"]
-                    print(f"📖 已选择对话: {selection['preview']}")
-                    print("现在可以在此对话中继续聊天...\n")
-                    continue
-                
-                elif selection["type"] == "select_by_date":
-                    self.thread_id = selection["thread_id"]
-                    print(f"📖 已选择对话: {selection['preview']}")
-                    print("现在可以在此对话中继续聊天...\n")
-                    continue
-                
-                elif selection["type"] == "new_conversation":
-                    self.thread_id = None
-                    print("🆕 已开始新会话。")
-                    continue
-                
-                elif selection["type"] == "new_question":
-                    # 当作新问题处理,继续下面的正常对话流程
-                    user_input = selection["question"]
-                    self.thread_id = None
-                    print("🆕 开始新对话...")
-                
-                elif selection["type"] in ["invalid_index", "no_date_match", "thread_not_found"]:
-                    print(f"❌ {selection['message']}")
-                    continue
-            
-            # 正常对话流程
-            print("🤖 Agent 正在思考...")
-            result = await self.agent.chat(user_input, self.user_id, self.thread_id)
-            
-            if result.get("success"):
-                answer = result.get('answer', '')
-                # 注释掉 [Formatted Output] 清理逻辑 - 源头已不生成前缀
-                # if answer.startswith("[Formatted Output]\n"):
-                #     answer = answer.replace("[Formatted Output]\n", "")
-                
-                print(f"🤖 Agent: {answer}")
-                
-                # 如果包含 SQL 数据,也显示出来
-                if 'sql_data' in result:
-                    print(f"📊 SQL 查询结果: {result['sql_data']}")
-                    
-                # 更新 thread_id 以便在同一会话中继续
-                self.thread_id = result.get("thread_id")
-            else:
-                error_msg = result.get('error', '未知错误')
-                print(f"❌ 发生错误: {error_msg}")
-                
-                # 提供针对性的建议
-                if "Connection error" in error_msg or "网络" in error_msg:
-                    print("💡 建议:")
-                    print("   - 检查网络连接是否正常")
-                    print("   - 稍后重试该问题")
-                    print("   - 如果问题持续,可以尝试重新启动程序")
-                elif "timeout" in error_msg.lower():
-                    print("💡 建议:")
-                    print("   - 当前网络较慢,建议稍后重试")
-                    print("   - 尝试简化问题复杂度")
-                else:
-                    print("💡 建议:")
-                    print("   - 请检查问题格式是否正确")
-                    print("   - 尝试重新描述您的问题")
-                
-                # 保持thread_id,用户可以继续对话
-                if not self.thread_id and result.get("thread_id"):
-                    self.thread_id = result.get("thread_id")
-
-    async def _show_current_history(self):
-        """显示当前会话的历史记录。"""
-        if not self.thread_id:
-            print("当前没有活跃的会话。请先开始对话。")
-            return
-        
-        print(f"\n--- 对话历史: {self.thread_id} ---")
-        history = await self.agent.get_conversation_history(self.thread_id)
-        if not history:
-            print("无法获取历史或历史为空。")
-            return
-            
-        for msg in history:
-            print(f"[{msg['type']}] {msg['content']}")
-        print("--- 历史结束 ---")
-
-
-async def main():
-    """主函数入口"""
-    shell = None
-    try:
-        shell = await CustomAgentShell.create()
-        await shell.start()
-    except KeyboardInterrupt:
-        logger.info("\n👋 检测到退出指令,正在清理资源...")
-    except Exception as e:
-        logger.error(f"❌ 程序发生严重错误: {e}", exc_info=True)
-    finally:
-        if shell:
-            await shell.close()
-        print("✅ 程序已成功关闭。")
-
-if __name__ == "__main__":
-    try:
-        asyncio.run(main())
-    except KeyboardInterrupt:
-        # 这个捕获是为了处理在 main 之外的 Ctrl+C
-        print("\n👋 程序被强制退出。") 

+ 0 - 282
test/custom_react_agent/sql_tools.py

@@ -1,282 +0,0 @@
-"""
-数据库查询相关的工具集
-"""
-import re
-import json
-import logging
-from langchain_core.tools import tool
-from pydantic.v1 import BaseModel, Field
-from typing import List, Dict, Any
-import pandas as pd
-
-logger = logging.getLogger(__name__)
-
-# --- Pydantic Schema for Tool Arguments ---
-
-class GenerateSqlArgs(BaseModel):
-    """Input schema for the generate_sql tool."""
-    question: str = Field(description="The user's question to be converted to SQL.")
-    history_messages: List[Dict[str, Any]] = Field(
-        default=[],
-        description="The conversation history messages for context."
-    )
-
-# --- Tool Functions ---
-
-@tool(args_schema=GenerateSqlArgs)
-def generate_sql(question: str, history_messages: List[Dict[str, Any]] = None) -> str:
-    """
-    Generates an SQL query based on the user's question and the conversation history.
-    """
-    logger.info(f"🔧 [Tool] generate_sql - Question: '{question}'")
-    
-    if history_messages is None:
-        history_messages = []
-    
-    logger.info(f"   History contains {len(history_messages)} messages.")
-
-    # Combine history and the current question to form a rich prompt
-    if history_messages:
-        history_str = "\n".join([f"{msg['type']}: {msg.get('content', '') or ''}" for msg in history_messages])
-        enriched_question = f"""Previous conversation context:
-{history_str}
-
-Current user question:
-human: {question}
-
-Please analyze the conversation history to understand any references (like "this service area", "that branch", etc.) in the current question, and generate the appropriate SQL query."""
-    else:
-        # If no history messages, use the original question directly
-        enriched_question = question
-
-    # 🎯 添加稳定的Vanna输入日志
-    logger.info("📝 [Vanna Input] Complete question being sent to Vanna:")
-    logger.info("--- BEGIN VANNA INPUT ---")
-    logger.info(enriched_question)
-    logger.info("--- END VANNA INPUT ---")
-
-    try:
-        from common.vanna_instance import get_vanna_instance
-        vn = get_vanna_instance()
-        sql = vn.generate_sql(enriched_question)
-
-        if not sql or sql.strip() == "":
-            if hasattr(vn, 'last_llm_explanation') and vn.last_llm_explanation:
-                error_info = vn.last_llm_explanation
-                logger.warning(f"   Vanna returned an explanation instead of SQL: {error_info}")
-                return f"Database query failed. Reason: {error_info}"
-            else:
-                logger.warning("   Vanna failed to generate SQL and provided no explanation.")
-                return "Could not generate SQL: The question may not be suitable for a database query."
-
-        sql_upper = sql.upper().strip()
-        if not any(keyword in sql_upper for keyword in ['SELECT', 'WITH']):
-            logger.warning(f"   Vanna returned a message that does not appear to be a valid SQL query: {sql}")
-            return f"Database query failed. Reason: {sql}"
-
-        logger.info(f"   ✅ SQL Generated Successfully:")
-        logger.info(f"   {sql}")
-        return sql
-
-    except Exception as e:
-        logger.error(f"   An exception occurred during SQL generation: {e}", exc_info=True)
-        return f"SQL generation failed: {str(e)}"
-
-def _check_basic_syntax(sql: str) -> bool:
-    """规则1: 检查SQL是否包含基础查询关键词"""
-    if not sql or sql.strip() == "":
-        return False
-    
-    sql_upper = sql.upper().strip()
-    return any(keyword in sql_upper for keyword in ['SELECT', 'WITH'])
-
-
-def _check_security(sql: str) -> tuple[bool, str]:
-    """规则2: 检查SQL是否包含危险操作
-    
-    Returns:
-        tuple: (是否安全, 错误信息)
-    """
-    sql_upper = sql.upper().strip()
-    dangerous_patterns = [r'\bDROP\b', r'\bDELETE\b', r'\bTRUNCATE\b', r'\bALTER\b', r'\bCREATE\b', r'\bUPDATE\b']
-    
-    for pattern in dangerous_patterns:
-        if re.search(pattern, sql_upper):
-            keyword = pattern.replace(r'\b', '').replace('\\', '')
-            return False, f"包含危险操作 {keyword}"
-    
-    return True, ""
-
-
-def _has_limit_clause(sql: str) -> bool:
-    """检测SQL是否包含LIMIT子句"""
-    # 使用正则表达式检测LIMIT关键词,支持多种格式
-    # LIMIT n 或 LIMIT offset, count 格式
-    limit_pattern = r'\bLIMIT\s+\d+(?:\s*,\s*\d+)?\s*(?:;|\s*$)'
-    return bool(re.search(limit_pattern, sql, re.IGNORECASE))
-
-
-def _validate_with_limit_zero(sql: str) -> str:
-    """规则3: 使用LIMIT 0验证SQL(适用于无LIMIT子句的SQL)"""
-    try:
-        from common.vanna_instance import get_vanna_instance
-        vn = get_vanna_instance()
-        
-        # 添加 LIMIT 0 避免返回大量数据,只验证SQL结构
-        test_sql = sql.rstrip(';') + " LIMIT 0"
-        logger.info(f"   执行LIMIT 0验证:")
-        logger.info(f"   {test_sql}")
-        vn.run_sql(test_sql)
-        
-        logger.info("   ✅ SQL验证通过:语法正确且字段/表存在")
-        return "SQL验证通过:语法正确且字段存在"
-        
-    except Exception as e:
-        return _format_validation_error(str(e))
-
-
-def _validate_with_prepare(sql: str) -> str:
-    """规则4: 使用PREPARE/DEALLOCATE验证SQL(适用于包含LIMIT子句的SQL)"""
-    import time
-    
-    try:
-        from common.vanna_instance import get_vanna_instance
-        vn = get_vanna_instance()
-        
-        # 生成唯一的语句名,避免并发冲突
-        stmt_name = f"validation_stmt_{int(time.time() * 1000)}"
-        prepare_executed = False
-        
-        try:
-            # 执行PREPARE验证
-            prepare_sql = f"PREPARE {stmt_name} AS {sql.rstrip(';')}"
-            logger.info(f"   执行PREPARE验证:")
-            logger.info(f"   {prepare_sql}")
-            
-            vn.run_sql(prepare_sql)
-            prepare_executed = True
-            
-            # 如果执行到这里没有异常,说明PREPARE成功
-            logger.info("   ✅ PREPARE执行成功,SQL验证通过")
-            return "SQL验证通过:语法正确且字段存在"
-            
-        except Exception as e:
-            error_msg = str(e).lower()
-            
-            # PostgreSQL中PREPARE不返回结果集是正常行为
-            if "no results to fetch" in error_msg:
-                prepare_executed = True  # 标记为成功执行
-                logger.info("   ✅ PREPARE执行成功(无结果集),SQL验证通过")
-                return "SQL验证通过:语法正确且字段存在"
-            else:
-                # 真正的错误(语法错误、字段不存在等)
-                raise e
-                
-        finally:
-            # 只有在PREPARE成功执行时才尝试清理资源
-            if prepare_executed:
-                try:
-                    deallocate_sql = f"DEALLOCATE {stmt_name}"
-                    logger.info(f"   清理PREPARE资源: {deallocate_sql}")
-                    vn.run_sql(deallocate_sql)
-                except Exception as cleanup_error:
-                    # 清理失败不影响验证结果,只记录警告
-                    logger.warning(f"   清理PREPARE资源失败: {cleanup_error}")
-                    
-    except Exception as e:
-        return _format_validation_error(str(e))
-
-
-def _format_validation_error(error_msg: str) -> str:
-    """格式化验证错误信息"""
-    logger.warning(f"   SQL验证失败:执行测试时出错 - {error_msg}")
-    
-    # 提供更详细的错误信息供LLM理解和处理
-    if "column" in error_msg.lower() and ("does not exist" in error_msg.lower() or "不存在" in error_msg):
-        return f"SQL验证失败:字段不存在。详细错误:{error_msg}"
-    elif "table" in error_msg.lower() and ("does not exist" in error_msg.lower() or "不存在" in error_msg):
-        return f"SQL验证失败:表不存在。详细错误:{error_msg}"
-    elif "syntax error" in error_msg.lower() or "语法错误" in error_msg:
-        return f"SQL验证失败:语法错误。详细错误:{error_msg}"
-    else:
-        return f"SQL验证失败:执行失败。详细错误:{error_msg}"
-
-
-@tool
-def valid_sql(sql: str) -> str:
-    """
-    验证SQL语句的正确性和安全性,使用四规则递进验证:
-    1. 基础语法检查(SELECT/WITH关键词)
-    2. 安全检查(无危险操作)
-    3. 语义验证:无LIMIT时使用LIMIT 0验证
-    4. 语义验证:有LIMIT时使用PREPARE/DEALLOCATE验证
-
-    Args:
-        sql: 待验证的SQL语句。
-
-    Returns:
-        验证结果。
-    """
-    logger.info(f"🔧 [Tool] valid_sql - 待验证SQL:")
-    logger.info(f"   {sql}")
-
-    # 规则1: 基础语法检查
-    if not _check_basic_syntax(sql):
-        logger.warning("   SQL验证失败:SQL语句为空或不是有效的查询语句")
-        return "SQL验证失败:SQL语句为空或不是有效的查询语句"
-
-    # 规则2: 安全检查
-    is_safe, security_error = _check_security(sql)
-    if not is_safe:
-        logger.error(f"   SQL验证失败:{security_error}")
-        return f"SQL验证失败:{security_error}"
-
-    # 规则3/4: 语义验证(二选一)
-    if _has_limit_clause(sql):
-        logger.info("   检测到LIMIT子句,使用PREPARE验证")
-        return _validate_with_prepare(sql)
-    else:
-        logger.info("   未检测到LIMIT子句,使用LIMIT 0验证")
-        return _validate_with_limit_zero(sql)
-
-@tool
-def run_sql(sql: str) -> str:
-    """
-    执行SQL查询并以JSON字符串格式返回结果。
-
-    Args:
-        sql: 待执行的SQL语句。
-
-    Returns:
-        JSON字符串格式的查询结果,或包含错误的JSON字符串。
-    """
-    logger.info(f"🔧 [Tool] run_sql - 待执行SQL:")
-    logger.info(f"   {sql}")
-
-    try:
-        from common.vanna_instance import get_vanna_instance
-        vn = get_vanna_instance()
-        df = vn.run_sql(sql)
-
-        print("-------------run_sql() df -------------------")
-        print(df)
-        print("--------------------------------")
-
-        if df is None:
-            logger.warning("   SQL执行成功,但查询结果为空。")
-            result = {"status": "success", "data": [], "message": "查询无结果"}
-            return json.dumps(result, ensure_ascii=False)
-
-        logger.info(f"   ✅ SQL执行成功,返回 {len(df)} 条记录。")
-        # 将DataFrame转换为JSON,并妥善处理datetime等特殊类型
-        return df.to_json(orient='records', date_format='iso')
-
-    except Exception as e:
-        logger.error(f"   SQL执行过程中发生异常: {e}", exc_info=True)
-        error_result = {"status": "error", "error_message": str(e)}
-        return json.dumps(error_result, ensure_ascii=False)
-    
-
-
-# 将所有工具函数收集到一个列表中,方便Agent导入和使用
-sql_tools = [generate_sql, valid_sql, run_sql] 

+ 0 - 21
test/custom_react_agent/state.py

@@ -1,21 +0,0 @@
-"""
-定义 StateGraph 的状态
-"""
-from typing import TypedDict, Annotated, Optional, List, Dict, Any
-from langchain_core.messages import BaseMessage
-from langgraph.graph.message import add_messages
-
-class AgentState(TypedDict):
-    """
-    StateGraph 中流转的状态对象定义。
-
-    Attributes:
-        messages: 对话消息列表,使用 add_messages 聚合。
-        user_id: 当前用户ID。
-        thread_id: 当前会话的线程ID。
-        suggested_next_step: 用于引导LLM下一步行动的建议指令。
-    """
-    messages: Annotated[List[BaseMessage], add_messages]
-    user_id: str
-    thread_id: str
-    suggested_next_step: Optional[str] 

+ 0 - 152
test/custom_react_agent/test/test_api.py

@@ -1,152 +0,0 @@
-#!/usr/bin/env python3
-"""
-Custom React Agent API 测试脚本
-
-测试基本的API功能,包括:
-1. 健康检查
-2. 普通问答
-3. SQL查询
-4. 错误处理
-
-运行前请确保API服务已启动:
-python api.py
-"""
-import asyncio
-import aiohttp
-import json
-import sys
-from typing import Dict, Any
-
-API_BASE_URL = "http://localhost:8000"
-
-class APITester:
-    """API测试类"""
-    
-    def __init__(self, base_url: str = API_BASE_URL):
-        self.base_url = base_url
-        self.session = None
-    
-    async def __aenter__(self):
-        self.session = aiohttp.ClientSession()
-        return self
-    
-    async def __aexit__(self, exc_type, exc_val, exc_tb):
-        if self.session:
-            await self.session.close()
-    
-    async def test_health_check(self) -> bool:
-        """测试健康检查"""
-        print("🔍 测试健康检查...")
-        try:
-            async with self.session.get(f"{self.base_url}/health") as response:
-                if response.status == 200:
-                    data = await response.json()
-                    print(f"   ✅ 健康检查通过: {data}")
-                    return True
-                else:
-                    print(f"   ❌ 健康检查失败: HTTP {response.status}")
-                    return False
-        except Exception as e:
-            print(f"   ❌ 健康检查异常: {e}")
-            return False
-    
-    async def test_chat_api(self, question: str, user_id: str = "test_user", 
-                           thread_id: str = None) -> Dict[str, Any]:
-        """测试聊天API"""
-        print(f"\n💬 测试问题: {question}")
-        
-        payload = {
-            "question": question,
-            "user_id": user_id
-        }
-        if thread_id:
-            payload["thread_id"] = thread_id
-        
-        try:
-            async with self.session.post(
-                f"{self.base_url}/api/chat",
-                json=payload,
-                headers={"Content-Type": "application/json"}
-            ) as response:
-                
-                response_data = await response.json()
-                
-                print(f"   📊 HTTP状态: {response.status}")
-                print(f"   📋 响应代码: {response_data.get('code')}")
-                print(f"   🎯 成功状态: {response_data.get('success')}")
-                
-                if response_data.get('success'):
-                    data = response_data.get('data', {})
-                    print(f"   💡 回答: {data.get('response', '')[:100]}...")
-                    
-                    if 'sql' in data:
-                        print(f"   🗄️  SQL: {data['sql'][:100]}...")
-                    
-                    if 'records' in data:
-                        records = data['records']
-                        print(f"   📈 数据行数: {records.get('total_row_count', 0)}")
-                    
-                    meta = data.get('react_agent_meta', {})
-                    print(f"   🔧 使用工具: {meta.get('tools_used', [])}")
-                    print(f"   🆔 会话ID: {meta.get('thread_id', '')}")
-                    
-                    return response_data
-                else:
-                    error = response_data.get('error', '未知错误')
-                    print(f"   ❌ 请求失败: {error}")
-                    return response_data
-                    
-        except Exception as e:
-            print(f"   ❌ 请求异常: {e}")
-            return {"success": False, "error": str(e)}
-    
-    async def run_test_suite(self):
-        """运行完整的测试套件"""
-        print("🚀 开始API测试套件")
-        print("=" * 50)
-        
-        # 1. 健康检查
-        health_ok = await self.test_health_check()
-        if not health_ok:
-            print("❌ 健康检查失败,停止测试")
-            return
-        
-        # 2. 普通问答测试
-        await self.test_chat_api("你好,你是谁?")
-        
-        # 3. SQL查询测试(假设有相关数据)
-        result1 = await self.test_chat_api("请查询服务区的收入情况")
-        
-        # 4. 上下文对话测试
-        thread_id = None
-        if result1.get('success'):
-            thread_id = result1.get('data', {}).get('react_agent_meta', {}).get('thread_id')
-        
-        if thread_id:
-            await self.test_chat_api("请详细解释一下", thread_id=thread_id)
-        
-        # 5. 错误处理测试
-        await self.test_chat_api("")  # 空问题
-        await self.test_chat_api("a" * 3000)  # 超长问题
-        
-        print("\n" + "=" * 50)
-        print("✅ 测试套件完成")
-
-async def main():
-    """主函数"""
-    print("Custom React Agent API 测试工具")
-    print("请确保API服务已在 http://localhost:8000 启动")
-    print()
-    
-    # 检查是否要运行特定测试
-    if len(sys.argv) > 1:
-        question = " ".join(sys.argv[1:])
-        async with APITester() as tester:
-            await tester.test_chat_api(question)
-    else:
-        # 运行完整测试套件
-        async with APITester() as tester:
-            await tester.run_test_suite()
-
-if __name__ == "__main__":
-    asyncio.run(main()) 

+ 0 - 198
test/custom_react_agent/test/test_api_design.py

@@ -1,198 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试修改后的 API 是否符合设计文档要求
-"""
-import json
-import asyncio
-import aiohttp
-from typing import Dict, Any
-
-async def test_api_design_compliance():
-    """测试 API 设计文档合规性"""
-    
-    base_url = "http://localhost:8000"
-    
-    # 测试用例
-    test_cases = [
-        {
-            "name": "基本聊天测试",
-            "payload": {
-                "question": "你好,我想了解一下今天的天气",
-                "user_id": "wang"
-            },
-            "expected_fields": ["response", "react_agent_meta", "timestamp"]
-        },
-        {
-            "name": "SQL查询测试",
-            "payload": {
-                "question": "请查询服务区的收入数据",
-                "user_id": "test_user"
-            },
-            "expected_fields": ["response", "sql", "records", "react_agent_meta", "timestamp"]
-        },
-        {
-            "name": "继续对话测试",
-            "payload": {
-                "question": "请详细说明一下",
-                "user_id": "wang",
-                "thread_id": None  # 将在第一个测试后设置
-            },
-            "expected_fields": ["response", "react_agent_meta", "timestamp"]
-        }
-    ]
-    
-    session = aiohttp.ClientSession()
-    
-    try:
-        print("🧪 开始测试 API 设计文档合规性...")
-        print("=" * 60)
-        
-        thread_id = None
-        
-        for i, test_case in enumerate(test_cases, 1):
-            print(f"\n📋 测试 {i}: {test_case['name']}")
-            print("-" * 40)
-            
-            # 如果是继续对话测试,使用之前的 thread_id
-            if test_case["name"] == "继续对话测试" and thread_id:
-                test_case["payload"]["thread_id"] = thread_id
-            
-            # 发送请求
-            async with session.post(
-                f"{base_url}/api/chat",
-                json=test_case["payload"],
-                headers={"Content-Type": "application/json"}
-            ) as response:
-                
-                print(f"📊 HTTP状态码: {response.status}")
-                
-                if response.status != 200:
-                    print(f"❌ 请求失败,状态码: {response.status}")
-                    continue
-                
-                # 解析响应
-                result = await response.json()
-                
-                # 验证顶级结构
-                required_top_fields = ["code", "message", "success", "data"]
-                for field in required_top_fields:
-                    if field not in result:
-                        print(f"❌ 缺少顶级字段: {field}")
-                    else:
-                        print(f"✅ 顶级字段 {field}: {result[field]}")
-                
-                # 验证 data 字段结构
-                if "data" in result:
-                    data = result["data"]
-                    print(f"\n📦 data 字段包含: {list(data.keys())}")
-                    
-                    # 验证必需字段
-                    required_fields = ["response", "react_agent_meta", "timestamp"]
-                    for field in required_fields:
-                        if field not in data:
-                            print(f"❌ data 中缺少必需字段: {field}")
-                        else:
-                            print(f"✅ 必需字段 {field}: 存在")
-                    
-                    # 验证可选字段
-                    optional_fields = ["sql", "records"]
-                    for field in optional_fields:
-                        if field in data:
-                            print(f"✅ 可选字段 {field}: 存在")
-                        else:
-                            print(f"ℹ️  可选字段 {field}: 不存在(正常)")
-                    
-                    # 验证 react_agent_meta 结构
-                    if "react_agent_meta" in data:
-                        meta = data["react_agent_meta"]
-                        print(f"\n🔧 react_agent_meta 字段: {list(meta.keys())}")
-                        
-                        # 保存 thread_id 用于后续测试
-                        if "thread_id" in meta:
-                            thread_id = meta["thread_id"]
-                            print(f"🆔 Thread ID: {thread_id}")
-                    
-                    # 验证 records 结构(如果存在)
-                    if "records" in data:
-                        records = data["records"]
-                        print(f"\n📊 records 字段: {list(records.keys())}")
-                        required_record_fields = ["columns", "rows", "total_row_count", "is_limited"]
-                        for field in required_record_fields:
-                            if field not in records:
-                                print(f"❌ records 中缺少字段: {field}")
-                            else:
-                                print(f"✅ records 字段 {field}: 存在")
-                
-                print(f"\n✅ 测试 {i} 完成")
-        
-        print("\n" + "=" * 60)
-        print("🎉 所有测试完成!")
-        
-    except Exception as e:
-        print(f"❌ 测试过程中发生错误: {e}")
-        import traceback
-        traceback.print_exc()
-    
-    finally:
-        await session.close()
-
-async def test_error_handling():
-    """测试错误处理"""
-    
-    base_url = "http://localhost:8000"
-    session = aiohttp.ClientSession()
-    
-    try:
-        print("\n🧪 测试错误处理...")
-        print("=" * 60)
-        
-        # 测试参数错误
-        test_cases = [
-            {
-                "name": "缺少问题",
-                "payload": {"user_id": "test"},
-                "expected_code": 400
-            },
-            {
-                "name": "空问题",
-                "payload": {"question": "", "user_id": "test"},
-                "expected_code": 400
-            },
-            {
-                "name": "问题过长",
-                "payload": {"question": "x" * 2001, "user_id": "test"},
-                "expected_code": 400
-            }
-        ]
-        
-        for test_case in test_cases:
-            print(f"\n📋 错误测试: {test_case['name']}")
-            
-            async with session.post(
-                f"{base_url}/api/chat",
-                json=test_case["payload"],
-                headers={"Content-Type": "application/json"}
-            ) as response:
-                
-                result = await response.json()
-                
-                print(f"📊 HTTP状态码: {response.status}")
-                print(f"📋 响应代码: {result.get('code')}")
-                print(f"🎯 成功状态: {result.get('success')}")
-                print(f"❌ 错误信息: {result.get('error')}")
-                
-                if response.status == test_case["expected_code"]:
-                    print("✅ 错误处理正确")
-                else:
-                    print(f"❌ 期望状态码 {test_case['expected_code']}, 实际 {response.status}")
-    
-    finally:
-        await session.close()
-
-if __name__ == "__main__":
-    print("🚀 启动 API 设计文档合规性测试")
-    print("请确保 API 服务已启动 (python api.py)")
-    print("=" * 60)
-    
-    asyncio.run(test_api_design_compliance())
-    asyncio.run(test_error_handling()) 

+ 0 - 116
test/custom_react_agent/test/test_api_modifications.py

@@ -1,116 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试 API 修改是否正确实现
-测试新增的 conversation_id 和 user_id 字段
-"""
-
-import requests
-import json
-
-def test_api_modifications():
-    """测试API修改"""
-    
-    api_url = "http://localhost:8000/api/chat"
-    
-    # 测试用例1: 使用 thread_id (原有方式)
-    test_case_1 = {
-        "question": "测试使用thread_id参数",
-        "user_id": "test_user_1",
-        "thread_id": "test_user_1:20250714120000001"
-    }
-    
-    # 测试用例2: 使用 conversation_id (新增方式)
-    test_case_2 = {
-        "question": "测试使用conversation_id参数", 
-        "user_id": "test_user_2",
-        "conversation_id": "test_user_2:20250714120000002"
-    }
-    
-    # 测试用例3: 同时提供两个参数 (应该优先使用thread_id)
-    test_case_3 = {
-        "question": "测试同时提供两个参数",
-        "user_id": "test_user_3", 
-        "thread_id": "test_user_3:20250714120000003",
-        "conversation_id": "test_user_3:20250714120000004"  # 这个应该被忽略
-    }
-    
-    # 测试用例4: 都不提供 (应该自动生成)
-    test_case_4 = {
-        "question": "测试自动生成会话ID",
-        "user_id": "test_user_4"
-    }
-    
-    test_cases = [
-        ("使用thread_id", test_case_1),
-        ("使用conversation_id", test_case_2), 
-        ("同时提供两个参数", test_case_3),
-        ("自动生成", test_case_4)
-    ]
-    
-    print("🧪 开始测试 API 修改...")
-    print("=" * 60)
-    
-    for test_name, test_data in test_cases:
-        print(f"\n📋 测试用例: {test_name}")
-        print(f"📨 请求数据: {json.dumps(test_data, ensure_ascii=False, indent=2)}")
-        
-        try:
-            response = requests.post(
-                api_url,
-                json=test_data,
-                headers={"Content-Type": "application/json"},
-                timeout=30
-            )
-            
-            print(f"📊 响应状态码: {response.status_code}")
-            
-            if response.status_code == 200:
-                result = response.json()
-                data = result.get("data", {})
-                
-                # 检查必需的新字段
-                conversation_id = data.get("conversation_id")
-                user_id = data.get("user_id") 
-                thread_id = data.get("react_agent_meta", {}).get("thread_id")
-                
-                print(f"✅ 响应成功:")
-                print(f"   - conversation_id: {conversation_id}")
-                print(f"   - user_id: {user_id}")
-                print(f"   - thread_id: {thread_id}")
-                print(f"   - conversation_id == thread_id: {conversation_id == thread_id}")
-                print(f"   - user_id 正确: {user_id == test_data['user_id']}")
-                
-                # 验证逻辑正确性
-                if test_name == "同时提供两个参数":
-                    expected_thread_id = test_data["thread_id"]
-                    if thread_id == expected_thread_id:
-                        print(f"   ✅ 优先使用 thread_id 逻辑正确")
-                    else:
-                        print(f"   ❌ 优先使用 thread_id 逻辑错误,期望: {expected_thread_id}, 实际: {thread_id}")
-                
-                elif test_name == "使用conversation_id":
-                    expected_thread_id = test_data["conversation_id"]
-                    if thread_id == expected_thread_id:
-                        print(f"   ✅ conversation_id 转换为 thread_id 逻辑正确")
-                    else:
-                        print(f"   ❌ conversation_id 转换逻辑错误,期望: {expected_thread_id}, 实际: {thread_id}")
-                
-            else:
-                print(f"❌ 请求失败: {response.text}")
-                
-        except requests.exceptions.RequestException as e:
-            print(f"❌ 网络错误: {e}")
-        except Exception as e:
-            print(f"❌ 其他错误: {e}")
-    
-    print("\n" + "=" * 60)
-    print("🎯 测试完成!")
-    print("\n💡 预期结果:")
-    print("1. 所有响应都应该包含 conversation_id 和 user_id 字段")
-    print("2. conversation_id 应该等于 react_agent_meta.thread_id")
-    print("3. user_id 应该等于请求中的 user_id")
-    print("4. 当同时提供 thread_id 和 conversation_id 时,应该优先使用 thread_id")
-    print("5. 当只提供 conversation_id 时,应该将其作为 thread_id 使用")
-
-if __name__ == "__main__":
-    test_api_modifications()

+ 0 - 97
test/custom_react_agent/test/test_asgi_setup.py

@@ -1,97 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试ASGI设置是否正确
-"""
-import sys
-import importlib.util
-
-def test_asgi_dependencies():
-    """测试ASGI依赖是否可用"""
-    print("🧪 测试ASGI依赖...")
-    
-    # 测试uvicorn
-    try:
-        import uvicorn
-        print(f"   ✅ uvicorn: {uvicorn.__version__}")
-    except ImportError:
-        print("   ❌ uvicorn: 未安装")
-        print("      安装命令: pip install uvicorn")
-        return False
-    
-    # 测试asgiref
-    try:
-        import asgiref
-        print(f"   ✅ asgiref: {asgiref.__version__}")
-    except ImportError:
-        print("   ❌ asgiref: 未安装")
-        print("      安装命令: pip install asgiref")
-        return False
-    
-    # 测试WsgiToAsgi
-    try:
-        from asgiref.wsgi import WsgiToAsgi
-        print("   ✅ WsgiToAsgi: 可用")
-    except ImportError:
-        print("   ❌ WsgiToAsgi: 不可用")
-        return False
-    
-    return True
-
-def test_api_import():
-    """测试API模块是否可以正常导入"""
-    print("\n🧪 测试API模块导入...")
-    
-    try:
-        from api import app
-        print("   ✅ Flask应用导入成功")
-        return True
-    except ImportError as e:
-        print(f"   ❌ Flask应用导入失败: {e}")
-        return False
-
-def test_asgi_conversion():
-    """测试ASGI转换是否工作"""
-    print("\n🧪 测试ASGI转换...")
-    
-    try:
-        from asgiref.wsgi import WsgiToAsgi
-        from api import app
-        
-        asgi_app = WsgiToAsgi(app)
-        print("   ✅ WSGI到ASGI转换成功")
-        return True
-    except Exception as e:
-        print(f"   ❌ ASGI转换失败: {e}")
-        return False
-
-def main():
-    """主测试函数"""
-    print("=" * 50)
-    print("🚀 ASGI设置测试")
-    print("=" * 50)
-    
-    success = True
-    
-    # 测试依赖
-    if not test_asgi_dependencies():
-        success = False
-    
-    # 测试API导入
-    if not test_api_import():
-        success = False
-    
-    # 测试ASGI转换
-    if success and not test_asgi_conversion():
-        success = False
-    
-    print("\n" + "=" * 50)
-    if success:
-        print("✅ 所有测试通过!可以使用ASGI模式启动")
-        print("💡 启动命令: python api.py")
-    else:
-        print("❌ 测试失败,请检查依赖安装")
-        print("💡 安装命令: pip install uvicorn asgiref")
-    print("=" * 50)
-
-if __name__ == "__main__":
-    main()

+ 0 - 234
test/custom_react_agent/test/test_conversation_api.py

@@ -1,234 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试新增的对话历史查询API
-"""
-
-import requests
-import json
-import time
-import sys
-from typing import Dict, Any
-
-API_BASE = "http://localhost:8000"
-
-def test_health_check():
-    """测试健康检查"""
-    print("🔍 测试健康检查...")
-    try:
-        response = requests.get(f"{API_BASE}/health")
-        result = response.json()
-        
-        if response.status_code == 200 and result.get("status") == "healthy":
-            print("✅ 健康检查通过")
-            return True
-        else:
-            print(f"❌ 健康检查失败: {result}")
-            return False
-    except Exception as e:
-        print(f"❌ 健康检查异常: {e}")
-        return False
-
-def create_test_conversations(user_id: str) -> list:
-    """创建测试对话"""
-    print(f"\n💬 为用户 {user_id} 创建测试对话...")
-    
-    test_questions = [
-        "请问哪个高速服务区的档口数量最多?",
-        "南城服务区有多少个餐饮档口?",
-        "请查询收入最高的服务区",
-        "你好,请介绍一下系统功能"
-    ]
-    
-    thread_ids = []
-    
-    for i, question in enumerate(test_questions):
-        print(f"  📝 创建对话 {i+1}: {question[:30]}...")
-        
-        try:
-            response = requests.post(
-                f"{API_BASE}/api/chat",
-                json={
-                    "question": question,
-                    "user_id": user_id
-                }
-            )
-            
-            if response.status_code == 200:
-                result = response.json()
-                if result.get("success"):
-                    thread_id = result.get("thread_id")
-                    thread_ids.append(thread_id)
-                    print(f"     ✅ 创建成功: {thread_id}")
-                else:
-                    print(f"     ❌ 创建失败: {result.get('error')}")
-            else:
-                print(f"     ❌ HTTP错误: {response.status_code}")
-                
-            # 稍微延迟,确保时间戳不同
-            time.sleep(1)
-            
-        except Exception as e:
-            print(f"     ❌ 异常: {e}")
-    
-    print(f"🎯 共创建了 {len(thread_ids)} 个测试对话")
-    return thread_ids
-
-def test_get_user_conversations(user_id: str, limit: int = 5):
-    """测试获取用户对话列表"""
-    print(f"\n📋 测试获取用户 {user_id} 的对话列表 (limit={limit})...")
-    
-    try:
-        response = requests.get(f"{API_BASE}/api/v0/react/users/{user_id}/conversations?limit={limit}")
-        
-        print(f"   状态码: {response.status_code}")
-        
-        if response.status_code == 200:
-            result = response.json()
-            
-            if result.get("success"):
-                data = result.get("data", {})
-                conversations = data.get("conversations", [])
-                
-                print(f"✅ 成功获取 {len(conversations)} 个对话")
-                print(f"   用户ID: {data.get('user_id')}")
-                print(f"   总数量: {data.get('total_count')}")
-                print(f"   限制数量: {data.get('limit')}")
-                
-                # 显示对话列表
-                for i, conv in enumerate(conversations):
-                    print(f"\n   📝 对话 {i+1}:")
-                    print(f"      Thread ID: {conv.get('thread_id')}")
-                    print(f"      时间戳: {conv.get('formatted_time')}")
-                    print(f"      消息数: {conv.get('message_count')}")
-                    print(f"      预览: {conv.get('conversation_preview')}")
-                    print(f"      最后消息: {conv.get('last_message', '')[:50]}...")
-                
-                return conversations
-            else:
-                print(f"❌ API返回错误: {result.get('error')}")
-                return []
-        else:
-            print(f"❌ HTTP错误: {response.status_code}")
-            try:
-                error_detail = response.json()
-                print(f"   错误详情: {error_detail}")
-            except:
-                print(f"   响应内容: {response.text}")
-            return []
-            
-    except Exception as e:
-        print(f"❌ 请求异常: {e}")
-        return []
-
-def test_get_conversation_detail(user_id: str, thread_id: str):
-    """测试获取对话详情"""
-    print(f"\n📖 测试获取对话详情: {thread_id}...")
-    
-    try:
-        response = requests.get(f"{API_BASE}/api/v0/react/users/{user_id}/conversations/{thread_id}")
-        
-        print(f"   状态码: {response.status_code}")
-        
-        if response.status_code == 200:
-            result = response.json()
-            
-            if result.get("success"):
-                data = result.get("data", {})
-                messages = data.get("messages", [])
-                
-                print(f"✅ 成功获取对话详情")
-                print(f"   用户ID: {data.get('user_id')}")
-                print(f"   Thread ID: {data.get('thread_id')}")
-                print(f"   消息数量: {data.get('message_count')}")
-                
-                # 显示消息历史
-                print(f"\n   📜 消息历史:")
-                for i, msg in enumerate(messages):
-                    msg_type = msg.get('type', 'unknown')
-                    content = msg.get('content', '')
-                    
-                    # 限制显示长度
-                    display_content = content[:100] + "..." if len(content) > 100 else content
-                    
-                    print(f"      [{i+1}] {msg_type.upper()}: {display_content}")
-                    
-                    # 如果有工具调用,显示相关信息
-                    if msg.get('tool_calls'):
-                        print(f"          🔧 包含工具调用")
-                
-                return data
-            else:
-                print(f"❌ API返回错误: {result.get('error')}")
-                return None
-        else:
-            print(f"❌ HTTP错误: {response.status_code}")
-            try:
-                error_detail = response.json()
-                print(f"   错误详情: {error_detail}")
-            except:
-                print(f"   响应内容: {response.text}")
-            return None
-            
-    except Exception as e:
-        print(f"❌ 请求异常: {e}")
-        return None
-
-def test_invalid_cases(user_id: str):
-    """测试无效情况的处理"""
-    print(f"\n⚠️  测试错误处理...")
-    
-    # 测试1: 不存在的用户
-    print("   测试不存在的用户...")
-    response = requests.get(f"{API_BASE}/api/v0/react/users/nonexistent_user/conversations")
-    print(f"   状态码: {response.status_code} (应该是200,返回空列表)")
-    
-    # 测试2: 不匹配的thread_id
-    print("   测试不匹配的thread_id...")
-    response = requests.get(f"{API_BASE}/api/v0/react/users/{user_id}/conversations/wronguser:20250115103000001")
-    print(f"   状态码: {response.status_code} (应该是400)")
-    
-    # 测试3: 超出限制的limit参数
-    print("   测试超出限制的limit参数...")
-    response = requests.get(f"{API_BASE}/api/v0/react/users/{user_id}/conversations?limit=100")
-    if response.status_code == 200:
-        result = response.json()
-        actual_limit = result.get("data", {}).get("limit", 0)
-        print(f"   实际limit: {actual_limit} (应该被限制为50)")
-
-def main():
-    """主测试流程"""
-    print("🚀 开始测试对话历史查询API")
-    print("=" * 60)
-    
-    # 1. 健康检查
-    if not test_health_check():
-        print("❌ 服务不可用,退出测试")
-        sys.exit(1)
-    
-    # 2. 设置测试用户
-    user_id = "test_user"
-    print(f"\n🎯 使用测试用户: {user_id}")
-    
-    # 3. 创建测试对话
-    thread_ids = create_test_conversations(user_id)
-    
-    if not thread_ids:
-        print("❌ 未能创建测试对话,跳过后续测试")
-        return
-    
-    # 4. 测试获取对话列表
-    conversations = test_get_user_conversations(user_id, limit=3)
-    
-    # 5. 测试获取对话详情
-    if conversations and len(conversations) > 0:
-        test_thread_id = conversations[0].get("thread_id")
-        test_get_conversation_detail(user_id, test_thread_id)
-    
-    # 6. 测试边界情况
-    test_invalid_cases(user_id)
-    
-    print("\n🎉 测试完成!")
-    print("=" * 60)
-
-if __name__ == "__main__":
-    main() 

+ 0 - 53
test/custom_react_agent/test/test_fix.py

@@ -1,53 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试Event loop修复效果
-"""
-import requests
-import json
-
-def test_fixed_api():
-    """测试修复后的API"""
-    print("🔍 测试修复后的API:")
-    print("=" * 40)
-    
-    # 测试用户提到的成功案例
-    print("根据用户反馈,对话列表API应该是正常工作的...")
-    print("但我的测试一直显示0个对话,让我们看看实际情况:")
-    
-    # 1. 测试对话列表
-    print("\n1. 对话列表API...")
-    try:
-        response = requests.get('http://localhost:8000/api/v0/react/users/doudou/conversations')
-        print(f"   状态: {response.status_code}")
-        
-        if response.status_code == 200:
-            data = response.json()
-            conversations = data.get("data", {}).get("conversations", [])
-            total_count = data.get("data", {}).get("total_count", 0)
-            success = data.get("success", False)
-            
-            print(f"   成功标志: {success}")
-            print(f"   对话数量: {len(conversations)}")
-            print(f"   total_count: {total_count}")
-            
-            if conversations:
-                print(f"   ✅ 找到对话!")
-                print(f"   首个对话: {conversations[0]['thread_id']}")
-                print(f"   对话预览: {conversations[0].get('conversation_preview', 'N/A')}")
-            else:
-                print(f"   ❌ 未找到对话(但用户说应该有1个对话)")
-        else:
-            print(f"   错误: {response.json()}")
-    except Exception as e:
-        print(f"   ❌ 请求失败: {e}")
-    
-    print("\n" + "=" * 40)
-    print("用户看到的结果:1个对话,包含preview等完整信息")
-    print("我看到的结果:0个对话")
-    print("可能的原因:服务器重启后Agent状态变化,或者我的测试时机有问题")
-    
-    # 先跳过对话详情测试,专注解决不一致问题
-    print("\n暂时跳过对话详情API测试,优先解决对话列表结果不一致的问题")
-
-if __name__ == "__main__":
-    test_fixed_api() 

+ 0 - 76
test/custom_react_agent/test/test_redis_simple.py

@@ -1,76 +0,0 @@
-#!/usr/bin/env python3
-"""
-超简单的Redis测试脚本
-"""
-import redis
-import json
-
-def test_redis_connection():
-    """测试Redis连接"""
-    print("🔗 测试Redis连接...")
-    
-    try:
-        # 创建Redis连接
-        r = redis.Redis(host='localhost', port=6379, decode_responses=True)
-        
-        # 测试连接
-        r.ping()
-        print("✅ Redis连接成功")
-        
-        # 扫描所有checkpoint keys
-        pattern = "checkpoint:*"
-        print(f"🔍 扫描所有checkpoint keys...")
-        
-        keys = []
-        cursor = 0
-        count = 0
-        
-        while True:
-            cursor, batch = r.scan(cursor=cursor, match=pattern, count=100)
-            keys.extend(batch)
-            count += len(batch)
-            print(f"   已扫描 {count} 个keys...")
-            if cursor == 0:
-                break
-            if count > 1000:  # 限制扫描数量
-                break
-        
-        print(f"📋 总共找到 {len(keys)} 个checkpoint keys")
-        
-        # 显示前几个key的格式
-        print("🔍 Key格式示例:")
-        for i, key in enumerate(keys[:5]):
-            print(f"   [{i+1}] {key}")
-        
-        # 查找doudou用户的keys
-        doudou_keys = [k for k in keys if k.startswith("checkpoint:doudou:")]
-        print(f"👤 doudou用户的keys: {len(doudou_keys)} 个")
-        
-        if doudou_keys:
-            print("📝 doudou的key示例:")
-            for i, key in enumerate(doudou_keys[:3]):
-                print(f"   [{i+1}] {key}")
-                
-                # 尝试获取数据
-                data = r.get(key)
-                if data:
-                    try:
-                        parsed = json.loads(data)
-                        print(f"       数据大小: {len(data)} 字符")
-                        print(f"       数据类型: {type(parsed)}")
-                        if isinstance(parsed, dict):
-                            print(f"       顶级keys: {list(parsed.keys())}")
-                    except Exception as e:
-                        print(f"       解析失败: {e}")
-        
-        r.close()
-        return True
-        
-    except Exception as e:
-        print(f"❌ Redis测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-if __name__ == "__main__":
-    test_redis_connection() 

+ 0 - 71
test/custom_react_agent/test/test_retry_logic.py

@@ -1,71 +0,0 @@
-"""
-测试修复后的重试逻辑
-"""
-import asyncio
-import sys
-import os
-
-# 添加路径
-CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.insert(0, CURRENT_DIR)
-
-import config
-
-def test_error_classification():
-    """测试错误分类逻辑"""
-    print("🧪 测试错误分类逻辑")
-    
-    # 测试用例
-    test_cases = [
-        ("Request timed out.", True, "应该识别为网络错误"),
-        ("APITimeoutError: timeout", True, "应该识别为网络错误"),
-        ("Connection error occurred", True, "应该识别为网络错误"),
-        ("ReadTimeout exception", True, "应该识别为网络错误"),
-        ("ValueError: invalid input", False, "应该识别为非网络错误"),
-        ("KeyError: missing key", False, "应该识别为非网络错误"),
-    ]
-    
-    # 网络错误关键词(与agent.py中一致)
-    network_keywords = [
-        "Connection error", "APIConnectionError", "ConnectError", 
-        "timeout", "timed out", "TimeoutError", "APITimeoutError",
-        "ReadTimeout", "ConnectTimeout", "远程主机强迫关闭", "网络连接"
-    ]
-    
-    for error_msg, expected, description in test_cases:
-        is_network_error = any(keyword in error_msg for keyword in network_keywords)
-        status = "✅" if is_network_error == expected else "❌"
-        print(f"   {status} {description}")
-        print(f"      错误信息: '{error_msg}'")
-        print(f"      预期: {'网络错误' if expected else '非网络错误'}")
-        print(f"      实际: {'网络错误' if is_network_error else '非网络错误'}")
-        print()
-
-def test_retry_intervals():
-    """测试重试间隔计算"""
-    print("⏱️  测试重试间隔计算")
-    
-    base_delay = config.RETRY_BASE_DELAY  # 2秒
-    max_retries = config.MAX_RETRIES      # 5次
-    
-    print(f"   基础延迟: {base_delay}秒")
-    print(f"   最大重试: {max_retries}次")
-    print()
-    
-    total_wait_time = 0
-    for attempt in range(max_retries - 1):  # 不包括最后一次(不会重试)
-        # 新的计算公式:wait_time = base_delay * (2 ** attempt) + attempt
-        wait_time = base_delay * (2 ** attempt) + attempt
-        total_wait_time += wait_time
-        print(f"   第{attempt + 1}次失败后等待: {wait_time}秒")
-    
-    print(f"\n   总等待时间: {total_wait_time}秒")
-    print(f"   加上LLM超时({config.NETWORK_TIMEOUT}秒 x {max_retries}次): {config.NETWORK_TIMEOUT * max_retries}秒")
-    print(f"   最大总耗时: {total_wait_time + config.NETWORK_TIMEOUT * max_retries}秒")
-
-if __name__ == "__main__":
-    print("🔧 测试修复后的重试机制\n")
-    test_error_classification()
-    print("=" * 50)
-    test_retry_intervals()
-    print("\n✅ 测试完成")

+ 0 - 81
test/custom_react_agent/test/test_shell_features.py

@@ -1,81 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试 shell.py 新增的对话选择功能
-"""
-import asyncio
-import sys
-import os
-
-# 确保导入路径正确
-CURRENT_DIR = os.path.dirname(os.path.abspath(__file__))
-sys.path.insert(0, CURRENT_DIR)
-
-from shell import CustomAgentShell
-
-async def test_conversation_selection():
-    """测试对话选择功能"""
-    print("🧪 测试对话选择功能...")
-    
-    try:
-        # 创建shell实例
-        shell = await CustomAgentShell.create()
-        print("✅ Shell创建成功!")
-        
-        # 设置测试数据
-        shell.user_id = 'test_user'
-        shell.recent_conversations = [
-            {
-                'thread_id': 'test_user:20250101120000001', 
-                'conversation_preview': 'Python编程问题',
-                'timestamp': '20250101120000001',
-                'formatted_time': '2025-01-01 12:00:00'
-            },
-            {
-                'thread_id': 'test_user:20250101130000001', 
-                'conversation_preview': 'SQL查询帮助',
-                'timestamp': '20250101130000001',
-                'formatted_time': '2025-01-01 13:00:00'
-            },
-        ]
-        
-        print("\n📋 测试对话选择解析:")
-        
-        # 测试不同的选择类型
-        test_cases = [
-            ('1', '数字序号选择'),
-            ('test_user:20250101120000001', 'Thread ID选择'),
-            ('2025-01-01', '日期选择'),
-            ('new', '新对话命令'),
-            ('What is Python?', '新问题'),
-            ('999', '无效序号'),
-            ('wrong_user:20250101120000001', '无效Thread ID'),
-            ('2025-12-31', '无效日期'),
-        ]
-        
-        for user_input, description in test_cases:
-            result = shell._parse_conversation_selection(user_input)
-            print(f"   输入: '{user_input}' ({description})")
-            print(f"   结果: {result['type']}")
-            if 'message' in result:
-                print(f"   消息: {result['message']}")
-            elif 'thread_id' in result:
-                print(f"   Thread ID: {result['thread_id']}")
-            print()
-        
-        print("📄 测试对话列表显示:")
-        shell._display_conversation_list(shell.recent_conversations)
-        
-        # 测试获取对话功能(这个需要真实的Agent连接)
-        print("\n🔍 测试获取对话功能:")
-        print("   (需要Redis和Agent连接,此处跳过)")
-        
-        await shell.close()
-        print("✅ 所有测试完成!")
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-
-if __name__ == "__main__":
-    asyncio.run(test_conversation_selection()) 

+ 0 - 26
test/custom_react_agent/test/test_simple_api.py

@@ -1,26 +0,0 @@
-import requests
-import json
-import time
-
-def test_api():
-    base_url = "http://localhost:5000"
-    
-    # 测试简单同步版本
-    print("=== 测试简单同步版本 ===")
-    try:
-        response = requests.get(f"{base_url}/api/test/users/wang/conversations?limit=5")
-        print(f"状态码: {response.status_code}")
-        print(f"响应: {json.dumps(response.json(), indent=2, ensure_ascii=False)}")
-    except Exception as e:
-        print(f"测试失败: {e}")
-    
-    print("\n=== 测试标准版本 ===")
-    try:
-        response = requests.get(f"{base_url}/api/v0/react/users/wang/conversations?limit=5")
-        print(f"状态码: {response.status_code}")
-        print(f"响应: {json.dumps(response.json(), indent=2, ensure_ascii=False)}")
-    except Exception as e:
-        print(f"测试失败: {e}")
-
-if __name__ == "__main__":
-    test_api() 

+ 0 - 173
test/custom_react_agent/test/test_valid_sql_simple.py

@@ -1,173 +0,0 @@
-#!/usr/bin/env python3
-"""
-简化版 valid_sql 测试脚本
-只测试三种错误场景:table不存在、column不存在、语法错误
-"""
-import asyncio
-import logging
-
-# 配置日志
-logging.basicConfig(
-    level=logging.INFO,
-    format='%(asctime)s - %(levelname)s - %(message)s'
-)
-logger = logging.getLogger(__name__)
-
-# 导入必要的模块
-try:
-    from agent import CustomReactAgent
-    from sql_tools import valid_sql
-    from langchain_core.messages import HumanMessage, ToolMessage, SystemMessage
-except ImportError as e:
-    logger.error(f"导入失败: {e}")
-    logger.info("请确保在正确的目录下运行此脚本")
-    exit(1)
-
-class SimpleValidSqlTester:
-    """简化版 valid_sql 测试类"""
-    
-    def __init__(self):
-        self.agent = None
-    
-    async def setup(self):
-        """初始化 Agent"""
-        logger.info("🚀 初始化 CustomReactAgent...")
-        try:
-            self.agent = await CustomReactAgent.create()
-            logger.info("✅ Agent 初始化完成")
-        except Exception as e:
-            logger.error(f"❌ Agent 初始化失败: {e}")
-            raise
-    
-    async def cleanup(self):
-        """清理资源"""
-        if self.agent:
-            await self.agent.close()
-            logger.info("✅ Agent 资源已清理")
-    
-    def test_valid_sql_direct(self, sql: str) -> str:
-        """直接测试 valid_sql 工具"""
-        logger.info(f"🔧 直接测试 valid_sql 工具")
-        logger.info(f"SQL: {sql}")
-        
-        result = valid_sql(sql)
-        logger.info(f"结果: {result}")
-        return result
-    
-    async def test_llm_response_to_error(self, question: str, error_sql: str, error_message: str):
-        """测试 LLM 对验证错误的响应"""
-        logger.info(f"🧠 测试 LLM 对验证错误的响应")
-        logger.info(f"问题: {question}")
-        logger.info(f"错误SQL: {error_sql}")
-        logger.info(f"错误信息: {error_message}")
-        
-        # 创建模拟的 state
-        state = {
-            "thread_id": "test_thread",
-            "messages": [
-                HumanMessage(content=question),
-                ToolMessage(
-                    content=error_sql,
-                    name="generate_sql",
-                    tool_call_id="test_call_1"
-                ),
-                ToolMessage(
-                    content=error_message,
-                    name="valid_sql", 
-                    tool_call_id="test_call_2"
-                )
-            ],
-            "suggested_next_step": "analyze_validation_error"
-        }
-        
-        try:
-            # 调用 Agent 的内部方法来测试处理逻辑
-            messages_for_llm = list(state["messages"])
-            
-            # 添加验证错误指导
-            error_guidance = self.agent._generate_validation_error_guidance(error_message)
-            messages_for_llm.append(SystemMessage(content=error_guidance))
-            
-            logger.info(f"📝 添加的错误指导: {error_guidance}")
-            
-            # 调用 LLM 看如何处理
-            response = await self.agent.llm_with_tools.ainvoke(messages_for_llm)
-            logger.info(f"🤖 LLM 响应: {response.content}")
-            
-            return response
-            
-        except Exception as e:
-            logger.error(f"❌ 测试失败: {e}")
-            return None
-
-async def test_three_scenarios():
-    """测试三种错误场景"""
-    logger.info("🧪 测试三种 valid_sql 错误场景")
-    
-    # 三种测试用例
-    test_cases = [
-        # {
-        #     "name": "表不存在",
-        #     "question": "查询员工表的信息",
-        #     "sql": "SELECT * FROM non_existent_table LIMIT 1"
-        # },
-        # {
-        #     "name": "字段不存在", 
-        #     "question": "查询每个服务区的经理姓名",
-        #     "sql": "SELECT non_existent_field FROM bss_business_day_data LIMIT 1"
-        # },
-        {
-            "name": "语法错误",
-            "question": "查询服务区数据 WHERE",
-            "sql": "SELECT service_name, pay_sum FROM bss_business_day_data WHERE service_name = '庐山服务区' AS service_alias"
-        }
-    ]
-    
-    tester = SimpleValidSqlTester()
-    
-    try:
-        await tester.setup()
-        
-        for i, test_case in enumerate(test_cases, 1):
-            logger.info(f"\n{'='*50}")
-            logger.info(f"测试用例 {i}: {test_case['name']}")
-            logger.info(f"{'='*50}")
-            
-            # 1. 直接测试 valid_sql
-            direct_result = tester.test_valid_sql_direct(test_case["sql"])
-            
-            # 2. 测试 LLM 响应
-            llm_response = await tester.test_llm_response_to_error(
-                test_case["question"], 
-                test_case["sql"], 
-                direct_result
-            )
-            
-            # 简单的结果分析
-            logger.info(f"\n📊 结果分析:")
-            if "失败" in direct_result:
-                logger.info("✅ valid_sql 正确捕获错误")
-            else:
-                logger.warning("⚠️ valid_sql 可能未正确捕获错误")
-            
-            if llm_response and ("错误" in llm_response.content or "失败" in llm_response.content):
-                logger.info("✅ LLM 正确处理验证错误")
-            else:
-                logger.warning("⚠️ LLM 可能未正确处理验证错误")
-        
-    except Exception as e:
-        logger.error(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-    
-    finally:
-        await tester.cleanup()
-
-async def main():
-    """主函数"""
-    logger.info("🚀 简化版 valid_sql 测试")
-    await test_three_scenarios()
-    logger.info("\n✅ 测试完成")
-
-if __name__ == "__main__":
-    asyncio.run(main()) 

+ 0 - 179
test/custom_react_agent/test/test_valid_sql_standalone.py

@@ -1,179 +0,0 @@
-#!/usr/bin/env python3
-"""
-独立测试 valid_sql 错误处理流程
-不修改任何现有代码,只模拟测试场景
-"""
-import asyncio
-import logging
-import json
-
-# 配置日志
-logging.basicConfig(
-    level=logging.INFO,
-    format='%(asctime)s - %(levelname)s - %(message)s'
-)
-logger = logging.getLogger(__name__)
-
-class MockValidSqlTool:
-    """模拟 valid_sql 工具的行为"""
-    
-    @staticmethod
-    def valid_sql(sql: str) -> str:
-        """模拟 valid_sql 工具的验证逻辑"""
-        logger.info(f"🔧 [Mock Tool] valid_sql - 待验证SQL: {sql}")
-        
-        # 模拟语法错误检测
-        if "AS service_alias" in sql and "WHERE" in sql:
-            logger.warning("   SQL验证失败:语法错误 - WHERE子句后不能直接使用AS别名")
-            return "SQL验证失败:语法错误。详细错误:syntax error at or near \"AS\""
-        
-        # 模拟表不存在检测
-        if "non_existent_table" in sql:
-            logger.warning("   SQL验证失败:表不存在")
-            return "SQL验证失败:表不存在。详细错误:relation \"non_existent_table\" does not exist"
-        
-        # 模拟字段不存在检测
-        if "non_existent_field" in sql:
-            logger.warning("   SQL验证失败:字段不存在")
-            return "SQL验证失败:字段不存在。详细错误:column \"non_existent_field\" does not exist"
-        
-        logger.info("   ✅ SQL验证通过")
-        return "SQL验证通过:语法正确且字段存在"
-
-class MockLLM:
-    """模拟 LLM 的响应行为"""
-    
-    @staticmethod
-    async def respond_to_validation_error(question: str, error_sql: str, error_message: str) -> str:
-        """模拟 LLM 对验证错误的响应"""
-        logger.info(f"🧠 [Mock LLM] 处理验证错误")
-        logger.info(f"问题: {question}")
-        logger.info(f"错误SQL: {error_sql}")
-        logger.info(f"错误信息: {error_message}")
-        
-        # 模拟不同类型的错误处理
-        if "语法错误" in error_message:
-            if "AS service_alias" in error_sql:
-                response = """我发现了SQL语法错误。在WHERE子句后不能直接使用AS别名。
-
-正确的SQL应该是:
-```sql
-SELECT service_name, pay_sum FROM bss_business_day_data WHERE service_name = '庐山服务区'
-```
-
-或者如果需要别名,应该这样写:
-```sql
-SELECT service_name AS service_alias, pay_sum FROM bss_business_day_data WHERE service_name = '庐山服务区'
-```
-
-问题在于AS别名应该在SELECT子句中定义,而不是在WHERE子句后。"""
-        elif "表不存在" in error_message:
-            response = """抱歉,您查询的表不存在。根据我的了解,系统中没有名为"non_existent_table"的表。
-
-可用的表包括:
-- bss_business_day_data (业务日数据表)
-- bss_car_day_count (车辆日统计表)
-- bss_company (公司信息表)
-
-请确认您要查询的表名是否正确。"""
-        elif "字段不存在" in error_message:
-            response = """抱歉,您查询的字段不存在。根据我的了解,bss_business_day_data表中没有名为"non_existent_field"的字段。
-
-该表的主要字段包括:
-- service_name (服务区名称)
-- pay_sum (支付金额)
-- business_date (业务日期)
-
-请确认您要查询的字段名是否正确。"""
-        else:
-            response = f"SQL验证失败:{error_message}。请检查SQL语句的语法和字段名称。"
-        
-        logger.info(f"🤖 [Mock LLM] 响应: {response[:100]}...")
-        return response
-
-class StandaloneValidSqlTester:
-    """独立的 valid_sql 测试类"""
-    
-    def __init__(self):
-        self.mock_valid_sql = MockValidSqlTool()
-        self.mock_llm = MockLLM()
-    
-    def test_valid_sql_direct(self, sql: str) -> str:
-        """直接测试 valid_sql 工具"""
-        logger.info(f"🔧 直接测试 valid_sql 工具")
-        logger.info(f"SQL: {sql}")
-        
-        result = self.mock_valid_sql.valid_sql(sql)
-        logger.info(f"结果: {result}")
-        return result
-    
-    async def test_llm_response_to_error(self, question: str, error_sql: str, error_message: str):
-        """测试 LLM 对验证错误的响应"""
-        logger.info(f"🧠 测试 LLM 对验证错误的响应")
-        
-        response = await self.mock_llm.respond_to_validation_error(question, error_sql, error_message)
-        return response
-
-async def test_three_scenarios():
-    """测试三种错误场景"""
-    logger.info("🧪 测试三种 valid_sql 错误场景")
-    
-    # 三种测试用例
-    test_cases = [
-        {
-            "name": "表不存在",
-            "question": "查询员工表的信息",
-            "sql": "SELECT * FROM non_existent_table LIMIT 1"
-        },
-        {
-            "name": "字段不存在", 
-            "question": "查询每个服务区的经理姓名",
-            "sql": "SELECT non_existent_field FROM bss_business_day_data LIMIT 1"
-        },
-        {
-            "name": "语法错误",
-            "question": "查询服务区数据 WHERE",
-            "sql": "SELECT service_name, pay_sum FROM bss_business_day_data WHERE service_name = '庐山服务区' AS service_alias"
-        }
-    ]
-    
-    tester = StandaloneValidSqlTester()
-    
-    for i, test_case in enumerate(test_cases, 1):
-        logger.info(f"\n{'='*50}")
-        logger.info(f"测试用例 {i}: {test_case['name']}")
-        logger.info(f"{'='*50}")
-        
-        # 1. 直接测试 valid_sql
-        direct_result = tester.test_valid_sql_direct(test_case["sql"])
-        
-        # 2. 测试 LLM 响应
-        llm_response = await tester.test_llm_response_to_error(
-            test_case["question"], 
-            test_case["sql"], 
-            direct_result
-        )
-        
-        # 简单的结果分析
-        logger.info(f"\n📊 结果分析:")
-        if "失败" in direct_result:
-            logger.info("✅ valid_sql 正确捕获错误")
-        else:
-            logger.warning("⚠️ valid_sql 可能未正确捕获错误")
-        
-        if llm_response and ("错误" in llm_response or "抱歉" in llm_response or "SQL" in llm_response):
-            logger.info("✅ LLM 正确处理验证错误")
-        else:
-            logger.warning("⚠️ LLM 可能未正确处理验证错误")
-        
-        logger.info(f"\n📝 LLM 完整响应:")
-        logger.info(llm_response)
-
-async def main():
-    """主函数"""
-    logger.info("🚀 独立 valid_sql 测试")
-    await test_three_scenarios()
-    logger.info("\n✅ 测试完成")
-
-if __name__ == "__main__":
-    asyncio.run(main()) 

+ 0 - 209
test/custom_react_agent/test/test_validation_improvements.py

@@ -1,209 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试 API 参数验证和错误处理改进
-验证JSON格式错误处理和用户ID一致性校验
-"""
-
-import requests
-import json
-
-def test_validation_improvements():
-    """测试参数验证改进"""
-    
-    api_url = "http://localhost:8000/api/chat"
-    
-    print("🧪 开始测试 API 参数验证改进...")
-    print("=" * 80)
-    
-    # 测试用例1: JSON格式错误 - 尾随逗号
-    print(f"\n📋 测试用例1: JSON格式错误(尾随逗号)")
-    malformed_json = '{ "question": "测试问题", "user_id": "wang01", "thread_id": "wang01:20250714102158117", }'
-    try:
-        response = requests.post(
-            api_url,
-            data=malformed_json,  # 使用data而不是json,模拟原始JSON字符串
-            headers={"Content-Type": "application/json"},
-            timeout=10
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        result = response.json()
-        print(f"📝 响应内容: {json.dumps(result, ensure_ascii=False, indent=2)}")
-        
-        # 验证是否是400错误且有明确的错误信息
-        if response.status_code == 400 and "JSON格式" in result.get("error", ""):
-            print("✅ JSON格式错误处理正确")
-        else:
-            print("❌ JSON格式错误处理有问题")
-            
-    except Exception as e:
-        print(f"❌ 测试JSON格式错误失败: {e}")
-    
-    # 测试用例2: 用户ID不一致 - thread_id
-    print(f"\n📋 测试用例2: 用户ID不一致(thread_id)")
-    test_case_2 = {
-        "question": "测试用户ID不一致",
-        "user_id": "alice",
-        "thread_id": "bob:20250714120000001"  # 用户ID不匹配
-    }
-    try:
-        response = requests.post(
-            api_url,
-            json=test_case_2,
-            headers={"Content-Type": "application/json"},
-            timeout=10
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        result = response.json()
-        print(f"📝 响应内容: {json.dumps(result, ensure_ascii=False, indent=2)}")
-        
-        # 验证是否正确检测到用户ID不一致
-        if response.status_code == 400 and "会话归属验证失败" in result.get("error", ""):
-            print("✅ 用户ID一致性校验正确")
-        else:
-            print("❌ 用户ID一致性校验有问题")
-            
-    except Exception as e:
-        print(f"❌ 测试用户ID一致性失败: {e}")
-    
-    # 测试用例3: 用户ID不一致 - conversation_id
-    print(f"\n📋 测试用例3: 用户ID不一致(conversation_id)")
-    test_case_3 = {
-        "question": "测试conversation_id用户ID不一致",
-        "user_id": "charlie",
-        "conversation_id": "david:20250714120000002"  # 用户ID不匹配
-    }
-    try:
-        response = requests.post(
-            api_url,
-            json=test_case_3,
-            headers={"Content-Type": "application/json"},
-            timeout=10
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        result = response.json()
-        print(f"📝 响应内容: {json.dumps(result, ensure_ascii=False, indent=2)}")
-        
-        if response.status_code == 400 and "会话归属验证失败" in result.get("error", ""):
-            print("✅ conversation_id用户ID一致性校验正确")
-        else:
-            print("❌ conversation_id用户ID一致性校验有问题")
-            
-    except Exception as e:
-        print(f"❌ 测试conversation_id用户ID一致性失败: {e}")
-    
-    # 测试用例4: 会话ID格式错误
-    print(f"\n📋 测试用例4: 会话ID格式错误(缺少冒号)")
-    test_case_4 = {
-        "question": "测试会话ID格式错误",
-        "user_id": "eve",
-        "thread_id": "eve20250714120000003"  # 缺少冒号
-    }
-    try:
-        response = requests.post(
-            api_url,
-            json=test_case_4,
-            headers={"Content-Type": "application/json"},
-            timeout=10
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        result = response.json()
-        print(f"📝 响应内容: {json.dumps(result, ensure_ascii=False, indent=2)}")
-        
-        if response.status_code == 400 and "会话ID格式无效" in result.get("error", ""):
-            print("✅ 会话ID格式校验正确")
-        else:
-            print("❌ 会话ID格式校验有问题")
-            
-    except Exception as e:
-        print(f"❌ 测试会话ID格式错误失败: {e}")
-    
-    # 测试用例5: 正常情况 - 验证修改不影响正常流程
-    print(f"\n📋 测试用例5: 正常情况(验证修改不影响正常流程)")
-    test_case_5 = {
-        "question": "这是一个正常的测试问题",
-        "user_id": "frank",
-        "thread_id": "frank:20250714120000005"
-    }
-    try:
-        response = requests.post(
-            api_url,
-            json=test_case_5,
-            headers={"Content-Type": "application/json"},
-            timeout=30
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        
-        if response.status_code == 200:
-            result = response.json()
-            print("✅ 正常请求处理成功")
-            print(f"   - conversation_id: {result.get('data', {}).get('conversation_id')}")
-            print(f"   - user_id: {result.get('data', {}).get('user_id')}")
-        else:
-            print(f"❌ 正常请求处理失败: {response.text}")
-            
-    except Exception as e:
-        print(f"❌ 测试正常情况失败: {e}")
-    
-    # 测试用例6: guest用户不受限制
-    print(f"\n📋 测试用例6: guest用户不受会话ID限制")
-    test_case_6 = {
-        "question": "guest用户测试",
-        "user_id": "guest",
-        "thread_id": "someuser:20250714120000006"  # guest用户应该不受限制
-    }
-    try:
-        response = requests.post(
-            api_url,
-            json=test_case_6,
-            headers={"Content-Type": "application/json"},
-            timeout=30
-        )
-        print(f"📊 响应状态码: {response.status_code}")
-        
-        if response.status_code == 200:
-            print("✅ guest用户不受会话ID限制,处理正确")
-        else:
-            result = response.json()
-            print(f"❌ guest用户处理有问题: {result}")
-            
-    except Exception as e:
-        print(f"❌ 测试guest用户失败: {e}")
-    
-    print("\n" + "=" * 80)
-    print("🎯 测试完成!")
-    print("\n💡 预期结果总结:")
-    print("1. JSON格式错误应该返回400错误,明确指出JSON格式问题")
-    print("2. 用户ID与thread_id/conversation_id不一致应该返回400错误")
-    print("3. 会话ID格式错误应该返回400错误")
-    print("4. 正常请求应该不受影响")
-    print("5. guest用户不受会话ID限制")
-
-def test_edge_cases():
-    """测试边界情况"""
-    
-    api_url = "http://localhost:8000/api/chat"
-    
-    print("\n🔍 测试边界情况...")
-    print("-" * 60)
-    
-    # 边界情况1: 复杂的会话ID格式
-    test_edge_1 = {
-        "question": "测试复杂会话ID",
-        "user_id": "user:with:colons",
-        "thread_id": "user:with:colons:20250714120000001:extra"
-    }
-    
-    try:
-        response = requests.post(api_url, json=test_edge_1, timeout=10)
-        print(f"🔬 复杂会话ID测试 - 状态码: {response.status_code}")
-        if response.status_code == 200:
-            print("✅ 复杂会话ID处理正确")
-        else:
-            result = response.json()
-            print(f"📝 错误信息: {result.get('error', '')}")
-    except Exception as e:
-        print(f"❌ 复杂会话ID测试失败: {e}")
-
-if __name__ == "__main__":
-    test_validation_improvements()
-    test_edge_cases()

+ 0 - 255
test/redis_conversation_demo.py

@@ -1,255 +0,0 @@
-"""
-Redis对话管理功能演示脚本
-
-这个脚本演示了如何使用Redis对话管理系统的各种功能:
-1. 创建对话
-2. 多轮对话(带上下文)
-3. 缓存命中
-4. 对话历史查询
-5. 统计信息查看
-"""
-
-import requests
-import json
-import time
-import sys
-import os
-
-class ConversationDemo:
-    def __init__(self, base_url="http://localhost:8084/api/v0"):
-        self.base_url = base_url
-        self.session_id = f"demo_session_{int(time.time())}"
-        self.conversation_id = None
-        self.user_id = None
-    
-    def print_section(self, title):
-        """打印分隔线"""
-        print("\n" + "="*60)
-        print(f" {title} ")
-        print("="*60)
-    
-    def demo_basic_conversation(self):
-        """演示基本对话功能"""
-        self.print_section("1. 基本对话功能")
-        
-        # 第一个问题
-        print("\n[DEMO] 发送第一个问题...")
-        response = requests.post(
-            f"{self.base_url}/ask_agent",
-            json={
-                "question": "高速公路服务区有多少个?",
-                "session_id": self.session_id
-            }
-        )
-        
-        if response.status_code == 200:
-            data = response.json()
-            self.conversation_id = data['data']['conversation_id']
-            self.user_id = data['data']['user_id']
-            
-            print(f"[结果] 对话ID: {self.conversation_id}")
-            print(f"[结果] 用户ID: {self.user_id}")
-            print(f"[结果] 是否为Guest用户: {data['data']['user_id'] == 'guest'}")
-            print(f"[结果] 回答: {data['data'].get('response', '')[:100]}...")
-        else:
-            print(f"[错误] 响应码: {response.status_code}")
-    
-    def demo_context_awareness(self):
-        """演示上下文感知功能"""
-        self.print_section("2. 上下文感知功能")
-        
-        if not self.conversation_id:
-            print("[警告] 需要先运行基本对话演示")
-            return
-        
-        # 第二个问题(依赖上下文)
-        print("\n[DEMO] 发送依赖上下文的问题...")
-        response = requests.post(
-            f"{self.base_url}/ask_agent",
-            json={
-                "question": "这些服务区的经理都是谁?",
-                "session_id": self.session_id,
-                "conversation_id": self.conversation_id
-            }
-        )
-        
-        if response.status_code == 200:
-            data = response.json()
-            print(f"[结果] 使用了上下文: {data['data'].get('context_used')}")
-            print(f"[结果] 对话状态: {data['data'].get('conversation_status')}")
-            print(f"[结果] 回答: {data['data'].get('response', '')[:100]}...")
-        else:
-            print(f"[错误] 响应码: {response.status_code}")
-    
-    def demo_cache_functionality(self):
-        """演示缓存功能"""
-        self.print_section("3. 缓存功能")
-        
-        # 问相同的问题
-        question = "高速公路服务区的总数是多少?"
-        
-        print(f"\n[DEMO] 第一次询问: {question}")
-        response1 = requests.post(
-            f"{self.base_url}/ask_agent",
-            json={
-                "question": question,
-                "session_id": self.session_id + "_cache",
-            }
-        )
-        
-        if response1.status_code == 200:
-            data1 = response1.json()
-            print(f"[结果] 来自缓存: {data1['data'].get('from_cache')}")
-            conv_id = data1['data']['conversation_id']
-            
-            # 立即再问一次
-            print(f"\n[DEMO] 第二次询问相同问题...")
-            response2 = requests.post(
-                f"{self.base_url}/ask_agent",
-                json={
-                    "question": question,
-                    "session_id": self.session_id + "_cache",
-                    "conversation_id": conv_id
-                }
-            )
-            
-            if response2.status_code == 200:
-                data2 = response2.json()
-                print(f"[结果] 来自缓存: {data2['data'].get('from_cache')}")
-    
-    def demo_conversation_history(self):
-        """演示对话历史查询"""
-        self.print_section("4. 对话历史查询")
-        
-        if not self.user_id:
-            print("[警告] 需要先运行基本对话演示")
-            return
-        
-        # 获取用户的对话列表
-        print(f"\n[DEMO] 获取用户 {self.user_id} 的对话列表...")
-        response = requests.get(
-            f"{self.base_url}/user/{self.user_id}/conversations"
-        )
-        
-        if response.status_code == 200:
-            data = response.json()
-            conversations = data['data']['conversations']
-            print(f"[结果] 找到 {len(conversations)} 个对话")
-            
-            for i, conv in enumerate(conversations):
-                print(f"\n  对话 {i+1}:")
-                print(f"    ID: {conv['conversation_id']}")
-                print(f"    创建时间: {conv['created_at']}")
-                print(f"    消息数: {conv['message_count']}")
-        
-        # 获取特定对话的消息
-        if self.conversation_id:
-            print(f"\n[DEMO] 获取对话 {self.conversation_id} 的消息...")
-            response = requests.get(
-                f"{self.base_url}/conversation/{self.conversation_id}/messages"
-            )
-            
-            if response.status_code == 200:
-                data = response.json()
-                messages = data['data']['messages']
-                print(f"[结果] 找到 {len(messages)} 条消息")
-                
-                for msg in messages:
-                    role = "用户" if msg['role'] == 'user' else "助手"
-                    content = msg['content'][:50] + "..." if len(msg['content']) > 50 else msg['content']
-                    print(f"\n  [{role}]: {content}")
-    
-    def demo_statistics(self):
-        """演示统计信息"""
-        self.print_section("5. 统计信息")
-        
-        print("\n[DEMO] 获取对话系统统计信息...")
-        response = requests.get(f"{self.base_url}/conversation_stats")
-        
-        if response.status_code == 200:
-            data = response.json()
-            stats = data['data']
-            
-            print(f"\n[统计信息]")
-            print(f"  Redis可用: {stats.get('available')}")
-            print(f"  总用户数: {stats.get('total_users')}")
-            print(f"  总对话数: {stats.get('total_conversations')}")
-            print(f"  缓存的问答数: {stats.get('cached_qa_count')}")
-            
-            if stats.get('redis_info'):
-                print(f"\n[Redis信息]")
-                print(f"  内存使用: {stats['redis_info'].get('used_memory')}")
-                print(f"  连接客户端数: {stats['redis_info'].get('connected_clients')}")
-    
-    def demo_invalid_conversation_id(self):
-        """演示无效对话ID处理"""
-        self.print_section("6. 无效对话ID处理")
-        
-        print("\n[DEMO] 使用无效的对话ID...")
-        response = requests.post(
-            f"{self.base_url}/ask_agent",
-            json={
-                "question": "测试无效ID",
-                "session_id": self.session_id,
-                "conversation_id": "invalid_conversation_xyz"
-            }
-        )
-        
-        if response.status_code == 200:
-            data = response.json()
-            print(f"[结果] 对话状态: {data['data'].get('conversation_status')}")
-            # 根据状态显示对应消息(本地化处理)
-            status = data['data'].get('conversation_status')
-            status_messages = {
-                'new': '创建新对话',
-                'continue': '继续已有对话', 
-                'invalid_id_new': '您请求的对话不存在或无权访问,已为您创建新对话'
-            }
-            print(f"[结果] 状态消息: {status_messages.get(status, '未知状态')}")
-            print(f"[结果] 请求的ID: {data['data'].get('requested_conversation_id')}")
-            print(f"[结果] 新创建的ID: {data['data'].get('conversation_id')}")
-    
-    def run_all_demos(self):
-        """运行所有演示"""
-        try:
-            # 检查服务是否可用
-            print("[DEMO] 检查服务可用性...")
-            response = requests.get(f"{self.base_url}/agent_health", timeout=5)
-            if response.status_code != 200:
-                print("[错误] 服务不可用,请先启动Flask应用")
-                return
-            
-            # 运行各个演示
-            self.demo_basic_conversation()
-            time.sleep(1)
-            
-            self.demo_context_awareness()
-            time.sleep(1)
-            
-            self.demo_cache_functionality()
-            time.sleep(1)
-            
-            self.demo_conversation_history()
-            time.sleep(1)
-            
-            self.demo_statistics()
-            time.sleep(1)
-            
-            self.demo_invalid_conversation_id()
-            
-            print("\n" + "="*60)
-            print(" 演示完成 ")
-            print("="*60)
-            
-        except Exception as e:
-            print(f"\n[错误] 演示过程中出错: {str(e)}")
-            print("请确保Flask应用正在运行 (python citu_app.py)")
-
-
-if __name__ == "__main__":
-    print("Redis对话管理功能演示")
-    print("确保已经启动了Flask应用和Redis服务")
-    print("-" * 60)
-    
-    demo = ConversationDemo()
-    demo.run_all_demos() 

+ 0 - 293
test/test_ask_agent_redis_integration.py

@@ -1,293 +0,0 @@
-import unittest
-import requests
-import json
-import sys
-import os
-import time
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-from common.redis_conversation_manager import RedisConversationManager
-
-class TestAskAgentRedisIntegration(unittest.TestCase):
-    """ask_agent API的Redis集成测试"""
-    
-    def setUp(self):
-        """测试前准备"""
-        self.base_url = "http://localhost:8084/api/v0"
-        self.test_session_id = "test_session_" + str(int(time.time()))
-        self.manager = RedisConversationManager()
-        
-    def tearDown(self):
-        """测试后清理"""
-        # 清理测试数据
-        pass
-    
-    def test_api_availability(self):
-        """测试API可用性"""
-        try:
-            response = requests.get(f"{self.base_url}/agent_health", timeout=5)
-            print(f"[TEST] Agent健康检查响应码: {response.status_code}")
-        except Exception as e:
-            self.skipTest(f"API服务不可用: {str(e)}")
-    
-    def test_basic_ask_agent(self):
-        """测试基本的ask_agent调用"""
-        try:
-            # 第一次调用 - 创建新对话
-            payload = {
-                "question": "测试问题:高速公路服务区有多少个?",
-                "session_id": self.test_session_id
-            }
-            
-            response = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload,
-                timeout=30
-            )
-            
-            print(f"[TEST] 第一次调用响应码: {response.status_code}")
-            
-            if response.status_code == 200:
-                data = response.json()
-                print(f"[TEST] 响应数据: {json.dumps(data, indent=2, ensure_ascii=False)}")
-                
-                # 验证返回字段
-                self.assertIn('data', data)
-                self.assertIn('conversation_id', data['data'])
-                self.assertIn('user_id', data['data'])
-                self.assertIn('conversation_status', data['data'])
-                
-                conversation_id = data['data']['conversation_id']
-                user_id = data['data']['user_id']
-                
-                print(f"[TEST] 创建的对话ID: {conversation_id}")
-                print(f"[TEST] 用户ID: {user_id}")
-                
-                return conversation_id, user_id
-                
-        except Exception as e:
-            self.skipTest(f"API调用失败: {str(e)}")
-    
-    def test_conversation_context(self):
-        """测试对话上下文功能"""
-        try:
-            # 第一次调用
-            payload1 = {
-                "question": "高速公路服务区有多少个?",
-                "session_id": self.test_session_id
-            }
-            
-            response1 = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload1,
-                timeout=30
-            )
-            
-            if response1.status_code != 200:
-                self.skipTest("第一次API调用失败")
-            
-            data1 = response1.json()
-            conversation_id = data1['data']['conversation_id']
-            
-            # 第二次调用 - 使用相同的对话ID
-            payload2 = {
-                "question": "这些服务区的经理都是谁?",  # 这个问题依赖于前面的上下文
-                "session_id": self.test_session_id,
-                "conversation_id": conversation_id
-            }
-            
-            response2 = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload2,
-                timeout=30
-            )
-            
-            print(f"[TEST] 第二次调用响应码: {response2.status_code}")
-            
-            if response2.status_code == 200:
-                data2 = response2.json()
-                print(f"[TEST] 使用了上下文: {data2['data'].get('context_used', False)}")
-                self.assertTrue(data2['data'].get('context_used', False))
-                
-        except Exception as e:
-            self.skipTest(f"上下文测试失败: {str(e)}")
-    
-    def test_cache_hit(self):
-        """测试缓存命中"""
-        try:
-            # 同样的问题问两次
-            question = "高速公路服务区的数量是多少?"
-            
-            # 第一次调用
-            payload = {
-                "question": question,
-                "session_id": self.test_session_id + "_cache_test"
-            }
-            
-            response1 = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload,
-                timeout=30
-            )
-            
-            if response1.status_code != 200:
-                self.skipTest("第一次API调用失败")
-            
-            data1 = response1.json()
-            from_cache1 = data1['data'].get('from_cache', False)
-            print(f"[TEST] 第一次调用from_cache: {from_cache1}")
-            self.assertFalse(from_cache1)
-            
-            # 立即第二次调用相同的问题
-            response2 = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload,
-                timeout=30
-            )
-            
-            if response2.status_code == 200:
-                data2 = response2.json()
-                from_cache2 = data2['data'].get('from_cache', False)
-                print(f"[TEST] 第二次调用from_cache: {from_cache2}")
-                # 注意:由于是新对话,可能不会命中缓存
-                
-        except Exception as e:
-            self.skipTest(f"缓存测试失败: {str(e)}")
-    
-    def test_invalid_conversation_id(self):
-        """测试无效的conversation_id处理"""
-        try:
-            payload = {
-                "question": "测试无效对话ID",
-                "session_id": self.test_session_id,
-                "conversation_id": "invalid_conv_id_xyz"
-            }
-            
-            response = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload,
-                timeout=30
-            )
-            
-            if response.status_code == 200:
-                data = response.json()
-                status = data['data'].get('conversation_status')
-                print(f"[TEST] 无效对话ID的状态: {status}")
-                self.assertEqual(status, 'invalid_id_new')
-                self.assertEqual(
-                    data['data'].get('requested_conversation_id'),
-                    'invalid_conv_id_xyz'
-                )
-                
-        except Exception as e:
-            self.skipTest(f"无效ID测试失败: {str(e)}")
-    
-    def test_conversation_api_endpoints(self):
-        """测试对话管理API端点"""
-        try:
-            # 先创建一个对话
-            result = self.test_basic_ask_agent()
-            if not result:
-                self.skipTest("无法创建测试对话")
-            
-            conversation_id, user_id = result
-            
-            # 测试获取用户对话列表
-            response = requests.get(
-                f"{self.base_url}/user/{user_id}/conversations",
-                timeout=10
-            )
-            
-            print(f"[TEST] 获取对话列表响应码: {response.status_code}")
-            if response.status_code == 200:
-                data = response.json()
-                self.assertIn('data', data)
-                self.assertIn('conversations', data['data'])
-                print(f"[TEST] 用户对话数: {len(data['data']['conversations'])}")
-            
-            # 测试获取对话消息
-            response = requests.get(
-                f"{self.base_url}/conversation/{conversation_id}/messages",
-                timeout=10
-            )
-            
-            print(f"[TEST] 获取对话消息响应码: {response.status_code}")
-            if response.status_code == 200:
-                data = response.json()
-                self.assertIn('data', data)
-                self.assertIn('messages', data['data'])
-                print(f"[TEST] 对话消息数: {len(data['data']['messages'])}")
-            
-            # 测试获取统计信息
-            response = requests.get(
-                f"{self.base_url}/conversation_stats",
-                timeout=10
-            )
-            
-            print(f"[TEST] 获取统计信息响应码: {response.status_code}")
-            if response.status_code == 200:
-                data = response.json()
-                self.assertIn('data', data)
-                stats = data['data']
-                print(f"[TEST] Redis可用: {stats.get('available')}")
-                print(f"[TEST] 总用户数: {stats.get('total_users')}")
-                print(f"[TEST] 总对话数: {stats.get('total_conversations')}")
-                
-        except Exception as e:
-            print(f"[ERROR] 管理API测试失败: {str(e)}")
-    
-    def test_guest_user_generation(self):
-        """测试guest用户生成"""
-        try:
-            # 不提供user_id,应该生成guest用户
-            payload = {
-                "question": "测试guest用户",
-                "session_id": self.test_session_id + "_guest"
-            }
-            
-            response = requests.post(
-                f"{self.base_url}/ask_agent",
-                json=payload,
-                timeout=30
-            )
-            
-            if response.status_code == 200:
-                data = response.json()
-                user_id = data['data']['user_id']
-                is_guest = user_id == "guest"  # 直接通过user_id判断
-                
-                print(f"[TEST] 生成的用户ID: {user_id}")
-                print(f"[TEST] 是否为guest用户: {is_guest}")
-                
-                self.assertTrue(user_id.startswith('guest_'))
-                self.assertTrue(is_guest)
-                
-        except Exception as e:
-            self.skipTest(f"Guest用户测试失败: {str(e)}")
-
-
-def run_selected_tests():
-    """运行选定的测试"""
-    suite = unittest.TestSuite()
-    
-    # 添加要运行的测试
-    suite.addTest(TestAskAgentRedisIntegration('test_api_availability'))
-    suite.addTest(TestAskAgentRedisIntegration('test_basic_ask_agent'))
-    suite.addTest(TestAskAgentRedisIntegration('test_conversation_context'))
-    suite.addTest(TestAskAgentRedisIntegration('test_invalid_conversation_id'))
-    suite.addTest(TestAskAgentRedisIntegration('test_conversation_api_endpoints'))
-    
-    runner = unittest.TextTestRunner(verbosity=2)
-    runner.run(suite)
-
-
-if __name__ == '__main__':
-    print("=" * 60)
-    print("ask_agent Redis集成测试")
-    print("注意: 需要先启动Flask应用 (python citu_app.py)")
-    print("=" * 60)
-    
-    # 可以选择运行所有测试或选定的测试
-    unittest.main()
-    # 或者运行选定的测试
-    # run_selected_tests() 

+ 0 - 106
test/test_config_refactor.py

@@ -1,106 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试配置重构是否成功
-"""
-
-def test_config_refactor():
-    """测试配置重构"""
-    print("=== 配置重构测试 ===")
-    
-    try:
-        import app_config
-        print("✓ app_config 导入成功")
-    except ImportError as e:
-        print(f"✗ app_config 导入失败: {e}")
-        return False
-    
-    # 测试新配置是否存在
-    new_configs = [
-        'API_DEEPSEEK_CONFIG',
-        'API_QWEN_CONFIG', 
-        'OLLAMA_EMBEDDING_CONFIG',
-        'API_LLM_MODEL',
-        'VECTOR_DB_TYPE'
-    ]
-    
-    print("\n--- 新配置检查 ---")
-    for config_name in new_configs:
-        if hasattr(app_config, config_name):
-            print(f"✓ {config_name} 存在")
-        else:
-            print(f"✗ {config_name} 不存在")
-            return False
-    
-    # 测试旧配置是否已删除
-    old_configs = [
-        'DEEPSEEK_CONFIG',
-        'QWEN_CONFIG',
-        'EMBEDDING_OLLAMA_CONFIG',
-        'LLM_MODEL_NAME',
-        'VECTOR_DB_NAME'
-    ]
-    
-    print("\n--- 旧配置检查 ---")
-    for config_name in old_configs:
-        if hasattr(app_config, config_name):
-            print(f"✗ {config_name} 仍然存在(应该已删除)")
-            return False
-        else:
-            print(f"✓ {config_name} 已删除")
-    
-    # 测试utils.py中的函数
-    print("\n--- Utils函数测试 ---")
-    try:
-        from common.utils import get_current_llm_config, get_current_embedding_config
-        
-        # 测试LLM配置
-        llm_config = get_current_llm_config()
-        print(f"✓ get_current_llm_config() 成功,返回类型: {type(llm_config)}")
-        
-        # 测试Embedding配置
-        embedding_config = get_current_embedding_config()
-        print(f"✓ get_current_embedding_config() 成功,返回类型: {type(embedding_config)}")
-        
-    except Exception as e:
-        print(f"✗ Utils函数测试失败: {e}")
-        return False
-    
-    # 测试配置内容
-    print("\n--- 配置内容验证 ---")
-    try:
-        # 验证API_QWEN_CONFIG
-        qwen_config = app_config.API_QWEN_CONFIG
-        if 'model' in qwen_config and 'api_key' in qwen_config:
-            print("✓ API_QWEN_CONFIG 结构正确")
-        else:
-            print("✗ API_QWEN_CONFIG 结构不正确")
-            return False
-            
-        # 验证API_DEEPSEEK_CONFIG
-        deepseek_config = app_config.API_DEEPSEEK_CONFIG
-        if 'model' in deepseek_config and 'api_key' in deepseek_config:
-            print("✓ API_DEEPSEEK_CONFIG 结构正确")
-        else:
-            print("✗ API_DEEPSEEK_CONFIG 结构不正确")
-            return False
-            
-        # 验证OLLAMA_EMBEDDING_CONFIG
-        ollama_embedding_config = app_config.OLLAMA_EMBEDDING_CONFIG
-        if 'model_name' in ollama_embedding_config and 'base_url' in ollama_embedding_config:
-            print("✓ OLLAMA_EMBEDDING_CONFIG 结构正确")
-        else:
-            print("✗ OLLAMA_EMBEDDING_CONFIG 结构不正确")
-            return False
-            
-    except Exception as e:
-        print(f"✗ 配置内容验证失败: {e}")
-        return False
-    
-    print("\n=== 配置重构测试完成 ===")
-    print("✓ 所有测试通过!配置重构成功!")
-    return True
-
-if __name__ == "__main__":
-    success = test_config_refactor()
-    if not success:
-        exit(1) 

+ 0 - 128
test/test_config_utils.py

@@ -1,128 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试配置工具函数的脚本
-用于验证common/utils.py中的函数是否正常工作
-"""
-
-def test_config_utils():
-    """测试配置工具函数"""
-    try:
-        from common.utils import (
-            get_current_embedding_config,
-            get_current_llm_config,
-            get_current_vector_db_config,
-            get_current_model_info,
-            is_using_ollama_llm,
-            is_using_ollama_embedding,
-            is_using_api_llm,
-            is_using_api_embedding,
-            print_current_config
-        )
-        
-        print("=== 测试配置工具函数 ===")
-        
-        # 测试模型类型检查函数
-        print(f"使用Ollama LLM: {is_using_ollama_llm()}")
-        print(f"使用Ollama Embedding: {is_using_ollama_embedding()}")
-        print(f"使用API LLM: {is_using_api_llm()}")
-        print(f"使用API Embedding: {is_using_api_embedding()}")
-        print()
-        
-        # 测试配置获取函数
-        print("=== LLM配置 ===")
-        llm_config = get_current_llm_config()
-        for key, value in llm_config.items():
-            if key == "api_key" and value:
-                print(f"{key}: {'*' * 8}...{value[-4:]}")  # 隐藏API密钥
-            else:
-                print(f"{key}: {value}")
-        print()
-        
-        print("=== Embedding配置 ===")
-        embedding_config = get_current_embedding_config()
-        for key, value in embedding_config.items():
-            if key == "api_key" and value:
-                print(f"{key}: {'*' * 8}...{value[-4:]}")  # 隐藏API密钥
-            else:
-                print(f"{key}: {value}")
-        print()
-        
-        print("=== 向量数据库配置 ===")
-        vector_db_config = get_current_vector_db_config()
-        for key, value in vector_db_config.items():
-            if key == "password" and value:
-                print(f"{key}: {'*' * 8}")  # 隐藏密码
-            else:
-                print(f"{key}: {value}")
-        print()
-        
-        # 测试模型信息摘要
-        print("=== 模型信息摘要 ===")
-        model_info = get_current_model_info()
-        for key, value in model_info.items():
-            print(f"{key}: {value}")
-        print()
-        
-        # 测试打印配置函数
-        print_current_config()
-        
-        print("✅ 所有配置工具函数测试通过!")
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-
-def test_different_configurations():
-    """测试不同配置组合"""
-    import app_config
-    
-    print("\n=== 测试不同配置组合 ===")
-    
-    # 保存原始配置
-    original_llm_type = app_config.LLM_MODEL_TYPE
-    original_embedding_type = app_config.EMBEDDING_MODEL_TYPE
-    original_llm_name = app_config.LLM_MODEL_NAME
-    
-    try:
-        from common.utils import get_current_model_info, print_current_config
-        
-        # 测试配置1:API LLM + API Embedding
-        print("\n--- 配置1:API LLM + API Embedding ---")
-        app_config.LLM_MODEL_TYPE = "api"
-        app_config.EMBEDDING_MODEL_TYPE = "api"
-        app_config.LLM_MODEL_NAME = "qwen"
-        print_current_config()
-        
-        # 测试配置2:API LLM + Ollama Embedding
-        print("\n--- 配置2:API LLM + Ollama Embedding ---")
-        app_config.LLM_MODEL_TYPE = "api"
-        app_config.EMBEDDING_MODEL_TYPE = "ollama"
-        app_config.LLM_MODEL_NAME = "deepseek"
-        print_current_config()
-        
-        # 测试配置3:Ollama LLM + API Embedding
-        print("\n--- 配置3:Ollama LLM + API Embedding ---")
-        app_config.LLM_MODEL_TYPE = "ollama"
-        app_config.EMBEDDING_MODEL_TYPE = "api"
-        print_current_config()
-        
-        # 测试配置4:Ollama LLM + Ollama Embedding
-        print("\n--- 配置4:Ollama LLM + Ollama Embedding ---")
-        app_config.LLM_MODEL_TYPE = "ollama"
-        app_config.EMBEDDING_MODEL_TYPE = "ollama"
-        print_current_config()
-        
-    except Exception as e:
-        print(f"❌ 配置测试失败: {e}")
-    finally:
-        # 恢复原始配置
-        app_config.LLM_MODEL_TYPE = original_llm_type
-        app_config.EMBEDDING_MODEL_TYPE = original_embedding_type
-        app_config.LLM_MODEL_NAME = original_llm_name
-        print("\n--- 恢复原始配置 ---")
-        print_current_config()
-
-if __name__ == "__main__":
-    test_config_utils()
-    test_different_configurations() 

+ 0 - 225
test/test_ollama_integration.py

@@ -1,225 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试Ollama集成功能的脚本
-用于验证Ollama LLM和Embedding是否正常工作
-"""
-
-def test_ollama_llm():
-    """测试Ollama LLM功能"""
-    print("=== 测试Ollama LLM ===")
-    
-    try:
-        from customollama.ollama_chat import OllamaChat
-        
-        # 测试配置
-        config = {
-            "base_url": "http://localhost:11434",
-            "model": "qwen2.5:7b",
-            "temperature": 0.7,
-            "timeout": 60
-        }
-        
-        # 创建实例
-        ollama_chat = OllamaChat(config=config)
-        
-        # 测试连接
-        print("测试Ollama连接...")
-        test_result = ollama_chat.test_connection()
-        
-        if test_result["success"]:
-            print(f"✅ Ollama LLM连接成功: {test_result['message']}")
-        else:
-            print(f"❌ Ollama LLM连接失败: {test_result['message']}")
-            return False
-            
-        # 测试简单对话
-        print("\n测试简单对话...")
-        response = ollama_chat.chat_with_llm("你好,请简单介绍一下你自己")
-        print(f"LLM响应: {response}")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ Ollama LLM测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_ollama_embedding():
-    """测试Ollama Embedding功能"""
-    print("\n=== 测试Ollama Embedding ===")
-    
-    try:
-        from customollama.ollama_embedding import OllamaEmbeddingFunction
-        
-        # 创建实例
-        embedding_func = OllamaEmbeddingFunction(
-            model_name="nomic-embed-text",
-            base_url="http://localhost:11434",
-            embedding_dimension=768
-        )
-        
-        # 测试连接
-        print("测试Ollama Embedding连接...")
-        test_result = embedding_func.test_connection()
-        
-        if test_result["success"]:
-            print(f"✅ Ollama Embedding连接成功: {test_result['message']}")
-        else:
-            print(f"❌ Ollama Embedding连接失败: {test_result['message']}")
-            return False
-            
-        # 测试生成embedding
-        print("\n测试生成embedding...")
-        test_texts = ["这是一个测试文本", "另一个测试文本"]
-        embeddings = embedding_func(test_texts)
-        
-        print(f"生成了 {len(embeddings)} 个embedding向量")
-        for i, emb in enumerate(embeddings):
-            print(f"文本 {i+1} 的embedding维度: {len(emb)}")
-            
-        return True
-        
-    except Exception as e:
-        print(f"❌ Ollama Embedding测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_ollama_with_config():
-    """测试使用配置文件的Ollama功能"""
-    print("\n=== 测试配置文件中的Ollama设置 ===")
-    
-    try:
-        import app_config
-        from common.utils import print_current_config, is_using_ollama_llm, is_using_ollama_embedding
-        
-        # 保存原始配置
-        original_llm_type = app_config.LLM_MODEL_TYPE
-        original_embedding_type = app_config.EMBEDDING_MODEL_TYPE
-        
-        try:
-            # 设置为Ollama模式
-            app_config.LLM_MODEL_TYPE = "ollama"
-            app_config.EMBEDDING_MODEL_TYPE = "ollama"
-            
-            print("当前配置:")
-            print_current_config()
-            
-            print(f"\n使用Ollama LLM: {is_using_ollama_llm()}")
-            print(f"使用Ollama Embedding: {is_using_ollama_embedding()}")
-            
-            # 测试embedding函数
-            print("\n测试通过配置获取embedding函数...")
-            from embedding_function import get_embedding_function
-            
-            embedding_func = get_embedding_function()
-            print(f"成功创建embedding函数: {type(embedding_func).__name__}")
-            
-            # 测试工厂函数(如果Ollama服务可用的话)
-            print("\n测试工厂函数...")
-            try:
-                from vanna_llm_factory import create_vanna_instance
-                vn = create_vanna_instance()
-                print(f"✅ 成功创建Vanna实例: {type(vn).__name__}")
-                return True
-            except Exception as e:
-                print(f"⚠️  工厂函数测试失败(可能是Ollama服务未启动): {e}")
-                return True  # 这不算失败,只是服务未启动
-                
-        finally:
-            # 恢复原始配置
-            app_config.LLM_MODEL_TYPE = original_llm_type
-            app_config.EMBEDDING_MODEL_TYPE = original_embedding_type
-            
-    except Exception as e:
-        print(f"❌ 配置测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_mixed_configurations():
-    """测试混合配置(API + Ollama)"""
-    print("\n=== 测试混合配置 ===")
-    
-    try:
-        import app_config
-        from common.utils import print_current_config
-        
-        # 保存原始配置
-        original_llm_type = app_config.LLM_MODEL_TYPE
-        original_embedding_type = app_config.EMBEDDING_MODEL_TYPE
-        
-        try:
-            # 测试配置1:API LLM + Ollama Embedding
-            print("\n--- 测试: API LLM + Ollama Embedding ---")
-            app_config.LLM_MODEL_TYPE = "api"
-            app_config.EMBEDDING_MODEL_TYPE = "ollama"
-            print_current_config()
-            
-            from embedding_function import get_embedding_function
-            embedding_func = get_embedding_function()
-            print(f"Embedding函数类型: {type(embedding_func).__name__}")
-            
-            # 测试配置2:Ollama LLM + API Embedding
-            print("\n--- 测试: Ollama LLM + API Embedding ---")
-            app_config.LLM_MODEL_TYPE = "ollama"
-            app_config.EMBEDDING_MODEL_TYPE = "api"
-            print_current_config()
-            
-            embedding_func = get_embedding_function()
-            print(f"Embedding函数类型: {type(embedding_func).__name__}")
-            
-            print("✅ 混合配置测试通过")
-            return True
-            
-        finally:
-            # 恢复原始配置
-            app_config.LLM_MODEL_TYPE = original_llm_type
-            app_config.EMBEDDING_MODEL_TYPE = original_embedding_type
-            
-    except Exception as e:
-        print(f"❌ 混合配置测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def main():
-    """主测试函数"""
-    print("开始测试Ollama集成功能...")
-    print("注意: 这些测试需要Ollama服务运行在 http://localhost:11434")
-    print("=" * 60)
-    
-    results = []
-    
-    # 测试配置和工具函数(不需要Ollama服务)
-    results.append(("配置文件测试", test_ollama_with_config()))
-    results.append(("混合配置测试", test_mixed_configurations()))
-    
-    # 测试实际的Ollama功能(需要Ollama服务)
-    print(f"\n{'='*60}")
-    print("以下测试需要Ollama服务运行,如果失败可能是服务未启动")
-    print("=" * 60)
-    
-    results.append(("Ollama LLM", test_ollama_llm()))
-    results.append(("Ollama Embedding", test_ollama_embedding()))
-    
-    # 总结
-    print(f"\n{'='*60}")
-    print("测试结果总结:")
-    print("=" * 60)
-    
-    for test_name, success in results:
-        status = "✅ 通过" if success else "❌ 失败"
-        print(f"{test_name}: {status}")
-    
-    total_passed = sum(1 for _, success in results if success)
-    print(f"\n总计: {total_passed}/{len(results)} 个测试通过")
-    
-    if total_passed == len(results):
-        print("🎉 所有测试都通过了!Ollama集成功能正常。")
-    else:
-        print("⚠️  部分测试失败,请检查Ollama服务是否正常运行。")
-
-if __name__ == "__main__":
-    main() 

+ 0 - 283
test/test_redis_conversation_manager.py

@@ -1,283 +0,0 @@
-import unittest
-import sys
-import os
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-from common.redis_conversation_manager import RedisConversationManager
-from datetime import datetime
-import time
-
-class TestRedisConversationManager(unittest.TestCase):
-    """Redis对话管理器单元测试"""
-    
-    def setUp(self):
-        """测试前准备"""
-        self.manager = RedisConversationManager()
-        # 清理测试数据
-        self.test_user_id = "test_user_123"
-        self.test_guest_id = "guest_test_456"
-        
-    def tearDown(self):
-        """测试后清理"""
-        # 清理测试创建的数据
-        if self.manager.is_available():
-            # 清理测试用户的对话
-            try:
-                conversations = self.manager.get_conversations(self.test_user_id)
-                for conv in conversations:
-                    conv_id = conv.get('conversation_id')
-                    if conv_id:
-                        self.manager.redis_client.delete(f"conversation:{conv_id}:meta")
-                        self.manager.redis_client.delete(f"conversation:{conv_id}:messages")
-                self.manager.redis_client.delete(f"user:{self.test_user_id}:conversations")
-                
-                # 清理guest用户
-                conversations = self.manager.get_conversations(self.test_guest_id)
-                for conv in conversations:
-                    conv_id = conv.get('conversation_id')
-                    if conv_id:
-                        self.manager.redis_client.delete(f"conversation:{conv_id}:meta")
-                        self.manager.redis_client.delete(f"conversation:{conv_id}:messages")
-                self.manager.redis_client.delete(f"user:{self.test_guest_id}:conversations")
-            except:
-                pass
-    
-    def test_redis_connection(self):
-        """测试Redis连接"""
-        is_available = self.manager.is_available()
-        print(f"[TEST] Redis可用状态: {is_available}")
-        if not is_available:
-            self.skipTest("Redis不可用,跳过测试")
-    
-    def test_user_id_resolution(self):
-        """测试用户ID解析逻辑"""
-        # 测试登录用户ID优先
-        user_id = self.manager.resolve_user_id(
-            "request_user", "session_123", "127.0.0.1", "login_user"
-        )
-        self.assertEqual(user_id, "login_user")
-        
-        # 测试请求参数用户ID
-        user_id = self.manager.resolve_user_id(
-            "request_user", "session_123", "127.0.0.1", None
-        )
-        self.assertEqual(user_id, "request_user")
-        
-        # 测试guest用户生成
-        user_id = self.manager.resolve_user_id(
-            None, "session_123", "127.0.0.1", None
-        )
-        self.assertTrue(user_id.startswith("guest_"))
-        
-        # 测试基于IP的临时guest
-        user_id = self.manager.resolve_user_id(
-            None, None, "127.0.0.1", None
-        )
-        self.assertTrue(user_id.startswith("guest_temp_"))
-    
-    def test_conversation_creation(self):
-        """测试对话创建"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        conv_id = self.manager.create_conversation(self.test_user_id)
-        print(f"[TEST] 创建的对话ID: {conv_id}")
-        
-        # 验证对话ID格式
-        self.assertTrue(conv_id.startswith("conv_"))
-        self.assertIn("_", conv_id)
-        
-        # 验证对话元信息
-        meta = self.manager.get_conversation_meta(conv_id)
-        self.assertEqual(meta.get('user_id'), self.test_user_id)
-        self.assertEqual(meta.get('conversation_id'), conv_id)
-        self.assertIn('created_at', meta)
-    
-    def test_message_saving_and_retrieval(self):
-        """测试消息保存和获取"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建对话
-        conv_id = self.manager.create_conversation(self.test_user_id)
-        
-        # 保存消息
-        self.manager.save_message(conv_id, "user", "测试问题")
-        self.manager.save_message(conv_id, "assistant", "测试回答")
-        
-        # 获取消息列表
-        messages = self.manager.get_conversation_messages(conv_id)
-        self.assertEqual(len(messages), 2)
-        
-        # 验证消息顺序(时间正序)
-        self.assertEqual(messages[0]['role'], 'user')
-        self.assertEqual(messages[0]['content'], '测试问题')
-        self.assertEqual(messages[1]['role'], 'assistant')
-        self.assertEqual(messages[1]['content'], '测试回答')
-    
-    def test_context_generation(self):
-        """测试上下文生成"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建对话并添加多条消息
-        conv_id = self.manager.create_conversation(self.test_user_id)
-        
-        self.manager.save_message(conv_id, "user", "问题1")
-        self.manager.save_message(conv_id, "assistant", "回答1")
-        self.manager.save_message(conv_id, "user", "问题2")
-        self.manager.save_message(conv_id, "assistant", "回答2")
-        
-        # 获取上下文
-        context = self.manager.get_context(conv_id, count=2)
-        print(f"[TEST] 生成的上下文:\n{context}")
-        
-        # 验证上下文格式
-        self.assertIn("用户: 问题1", context)
-        self.assertIn("助手: 回答1", context)
-        self.assertIn("用户: 问题2", context)
-        self.assertIn("助手: 回答2", context)
-    
-    def test_conversation_list(self):
-        """测试用户对话列表"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建多个对话
-        conv_ids = []
-        for i in range(3):
-            conv_id = self.manager.create_conversation(self.test_user_id)
-            conv_ids.append(conv_id)
-            time.sleep(0.1)  # 确保时间戳不同
-        
-        # 获取对话列表
-        conversations = self.manager.get_conversations(self.test_user_id)
-        self.assertEqual(len(conversations), 3)
-        
-        # 验证顺序(最新的在前)
-        self.assertEqual(conversations[0]['conversation_id'], conv_ids[2])
-        self.assertEqual(conversations[1]['conversation_id'], conv_ids[1])
-        self.assertEqual(conversations[2]['conversation_id'], conv_ids[0])
-    
-    def test_cache_functionality(self):
-        """测试缓存功能"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        question = "测试缓存问题"
-        context = "用户: 之前的问题\n助手: 之前的回答"
-        
-        # 测试缓存未命中
-        cached = self.manager.get_cached_answer(question, context)
-        self.assertIsNone(cached)
-        
-        # 缓存答案
-        answer = {
-            "success": True,
-            "data": {
-                "response": "测试答案",
-                "type": "CHAT"
-            }
-        }
-        self.manager.cache_answer(question, answer, context)
-        
-        # 测试缓存命中
-        cached = self.manager.get_cached_answer(question, context)
-        self.assertIsNotNone(cached)
-        self.assertEqual(cached['data']['response'], '测试答案')
-        
-        # 测试不同上下文的缓存
-        different_context = "用户: 不同的问题\n助手: 不同的回答"
-        cached = self.manager.get_cached_answer(question, different_context)
-        self.assertIsNone(cached)  # 不同上下文应该缓存未命中
-    
-    def test_conversation_id_resolution(self):
-        """测试对话ID解析"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 测试创建新对话
-        conv_id, status = self.manager.resolve_conversation_id(
-            self.test_user_id, None, False
-        )
-        self.assertTrue(conv_id.startswith("conv_"))
-        self.assertEqual(status['status'], 'new')
-        
-        # 测试使用已存在的对话
-        conv_id2, status2 = self.manager.resolve_conversation_id(
-            self.test_user_id, conv_id, False
-        )
-        self.assertEqual(conv_id2, conv_id)
-        self.assertEqual(status2['status'], 'existing')
-        
-        # 测试无效的对话ID
-        conv_id3, status3 = self.manager.resolve_conversation_id(
-            self.test_user_id, "invalid_conv_id", False
-        )
-        self.assertNotEqual(conv_id3, "invalid_conv_id")
-        self.assertEqual(status3['status'], 'invalid_id_new')
-        self.assertEqual(status3['requested_id'], 'invalid_conv_id')
-    
-    def test_statistics(self):
-        """测试统计功能"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建测试数据
-        conv_id = self.manager.create_conversation(self.test_user_id)
-        self.manager.save_message(conv_id, "user", "统计测试")
-        
-        # 获取统计信息
-        stats = self.manager.get_stats()
-        print(f"[TEST] 统计信息: {stats}")
-        
-        self.assertTrue(stats['available'])
-        self.assertIn('total_users', stats)
-        self.assertIn('total_conversations', stats)
-        self.assertIn('cached_qa_count', stats)
-        
-    def test_guest_user_limit(self):
-        """测试guest用户对话数量限制"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建多个对话,超过guest用户限制
-        from app_config import MAX_GUEST_CONVERSATIONS
-        
-        conv_ids = []
-        for i in range(MAX_GUEST_CONVERSATIONS + 2):
-            conv_id = self.manager.create_conversation(self.test_guest_id)
-            conv_ids.append(conv_id)
-            time.sleep(0.05)
-        
-        # 验证只保留了限制数量的对话
-        conversations = self.manager.get_conversations(self.test_guest_id)
-        self.assertEqual(len(conversations), MAX_GUEST_CONVERSATIONS)
-        
-        # 验证保留的是最新的对话
-        retained_ids = [conv['conversation_id'] for conv in conversations]
-        for i in range(MAX_GUEST_CONVERSATIONS):
-            self.assertIn(conv_ids[-(i+1)], retained_ids)
-    
-    def test_cleanup_functionality(self):
-        """测试清理功能"""
-        if not self.manager.is_available():
-            self.skipTest("Redis不可用")
-        
-        # 创建对话
-        conv_id = self.manager.create_conversation(self.test_user_id)
-        
-        # 手动删除对话元信息,模拟过期
-        self.manager.redis_client.delete(f"conversation:{conv_id}:meta")
-        
-        # 执行清理
-        self.manager.cleanup_expired_conversations()
-        
-        # 验证对话已从用户列表中移除
-        conversations = self.manager.get_conversations(self.test_user_id)
-        conv_ids = [conv['conversation_id'] for conv in conversations]
-        self.assertNotIn(conv_id, conv_ids)
-
-
-if __name__ == '__main__':
-    unittest.main() 

+ 0 - 111
test/test_redis_fix_validation.py

@@ -1,111 +0,0 @@
-"""
-Redis集成修复验证测试
-
-这个脚本用于快速验证Redis集成的修复是否有效
-"""
-
-import requests
-import json
-import time
-
-def test_ask_agent_basic():
-    """测试基本的ask_agent功能"""
-    base_url = "http://localhost:8084/api/v0"
-    
-    print("=== Redis集成修复验证测试 ===\n")
-    
-    # 测试1:第一次请求(应该成功)
-    print("1. 测试第一次请求...")
-    print("   (注意:第一次请求可能需要较长时间,请耐心等待...)")
-    response1 = requests.post(
-        f"{base_url}/ask_agent",
-        json={"question": "服务区有多少个?"},
-        timeout=120  # 增加到120秒,适应较慢的响应
-    )
-    
-    print(f"   状态码: {response1.status_code}")
-    result1 = response1.json()
-    print(f"   成功: {result1.get('success')}")
-    print(f"   消息: {result1.get('message')}")
-    
-    if result1.get('success'):
-        data = result1.get('data', {})
-        print(f"   响应类型: {data.get('type')}")
-        print(f"   响应文本: {data.get('response_text', '')[:50]}...")
-        print(f"   是否缓存: {data.get('from_cache', False)}")
-        print(f"   对话ID: {data.get('conversation_id')}")
-    else:
-        print(f"   错误: {json.dumps(result1, indent=2, ensure_ascii=False)}")
-    
-    # 等待一下
-    time.sleep(1)
-    
-    # 测试2:第二次相同请求(应该使用缓存)
-    print("\n2. 测试第二次请求(相同问题,应该使用缓存)...")
-    response2 = requests.post(
-        f"{base_url}/ask_agent",
-        json={"question": "服务区有多少个?"},
-        timeout=60  # 也增加超时时间,虽然缓存应该更快
-    )
-    
-    print(f"   状态码: {response2.status_code}")
-    result2 = response2.json()
-    print(f"   成功: {result2.get('success')}")
-    
-    if result2.get('success'):
-        data = result2.get('data', {})
-        print(f"   是否缓存: {data.get('from_cache', False)}")
-        print(f"   响应文本: {data.get('response_text', '')[:50]}...")
-        
-        # 验证缓存功能
-        if data.get('from_cache'):
-            print("\n✅ 缓存功能正常工作!")
-        else:
-            print("\n⚠️ 缓存功能可能有问题,第二次请求没有使用缓存")
-    else:
-        print(f"   错误: {json.dumps(result2, indent=2, ensure_ascii=False)}")
-        print("\n❌ 第二次请求失败,可能是缓存格式问题")
-    
-    # 测试3:测试对话管理API
-    print("\n3. 测试对话管理API...")
-    try:
-        stats_response = requests.get(f"{base_url}/conversation_stats", timeout=5)
-        if stats_response.status_code == 200:
-            stats = stats_response.json()
-            if stats.get('success'):
-                print("   ✅ 对话统计API正常")
-                print(f"   总对话数: {stats.get('data', {}).get('total_conversations', 0)}")
-                print(f"   总用户数: {stats.get('data', {}).get('total_users', 0)}")
-            else:
-                print("   ⚠️ 对话统计API返回失败")
-        else:
-            print(f"   ❌ 对话统计API错误: {stats_response.status_code}")
-    except Exception as e:
-        print(f"   ❌ 对话统计API异常: {str(e)}")
-    
-    print("\n=== 测试完成 ===")
-    
-    # 返回测试结果
-    return {
-        "first_request_success": result1.get('success', False),
-        "second_request_success": result2.get('success', False),
-        "cache_working": result2.get('data', {}).get('from_cache', False) if result2.get('success') else False
-    }
-
-if __name__ == "__main__":
-    try:
-        results = test_ask_agent_basic()
-        
-        print("\n测试结果汇总:")
-        print(f"- 第一次请求: {'✅ 成功' if results['first_request_success'] else '❌ 失败'}")
-        print(f"- 第二次请求: {'✅ 成功' if results['second_request_success'] else '❌ 失败'}")
-        print(f"- 缓存功能: {'✅ 正常' if results['cache_working'] else '❌ 异常'}")
-        
-        if all(results.values()):
-            print("\n🎉 所有测试通过!Redis集成修复成功!")
-        else:
-            print("\n❗ 部分测试失败,请检查日志")
-            
-    except Exception as e:
-        print(f"\n❌ 测试异常: {str(e)}")
-        print("请确保Flask服务正在运行 (python citu_app.py)") 

+ 0 - 94
test/test_routing_modes.py

@@ -1,94 +0,0 @@
-# test_routing_modes.py - 测试不同路由模式的功能
-
-import sys
-import os
-# 添加项目根目录到sys.path,以便导入app_config.py
-sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-def test_routing_modes():
-    """测试不同路由模式的配置和分类器行为"""
-    
-    print("=== 路由模式测试 ===")
-    
-    # 1. 测试配置参数
-    try:
-        import app_config
-        print(f"✓ 配置导入成功")
-        print(f"当前路由模式: {getattr(app_config, 'QUESTION_ROUTING_MODE', '未找到')}")
-    except ImportError as e:
-        print(f"✗ 配置导入失败: {e}")
-        return False
-    
-    # 2. 测试分类器
-    try:
-        from agent.classifier import QuestionClassifier, ClassificationResult
-        classifier = QuestionClassifier()
-        print(f"✓ 分类器创建成功")
-        
-        # 测试问题
-        test_questions = [
-            "查询本月服务区营业额",
-            "你好,请介绍一下平台功能",
-            "请问负责每个服务区的经理的名字是什么?"
-        ]
-        
-        # 临时修改路由模式进行测试
-        original_mode = getattr(app_config, 'QUESTION_ROUTING_MODE', 'hybrid')
-        
-        for mode in ["hybrid", "llm_only", "database_direct", "chat_direct"]:
-            print(f"\n--- 测试路由模式: {mode} ---")
-            app_config.QUESTION_ROUTING_MODE = mode
-            
-            for question in test_questions:
-                try:
-                    result = classifier.classify(question)
-                    print(f"问题: {question}")
-                    print(f"  分类: {result.question_type}")
-                    print(f"  置信度: {result.confidence}")
-                    print(f"  方法: {result.method}")
-                    print(f"  理由: {result.reason[:50]}...")
-                except Exception as e:
-                    print(f"  分类异常: {e}")
-        
-        # 恢复原始配置
-        app_config.QUESTION_ROUTING_MODE = original_mode
-        print(f"\n✓ 分类器测试完成")
-        
-    except ImportError as e:
-        print(f"✗ 分类器导入失败: {e}")
-        return False
-    except Exception as e:
-        print(f"✗ 分类器测试异常: {e}")
-        return False
-    
-    # 3. 测试Agent状态
-    try:
-        from agent.state import AgentState
-        print(f"✓ Agent状态定义正确")
-    except ImportError as e:
-        print(f"✗ Agent状态导入失败: {e}")
-        return False
-    
-    # 4. 测试Agent工作流创建(基础测试,不实际运行)
-    try:
-        from agent.citu_agent import CituLangGraphAgent
-        print(f"✓ Agent类导入成功")
-        
-        # 注意:这里只测试导入,不实际创建Agent实例
-        # 因为可能涉及LLM连接等复杂依赖
-        
-    except ImportError as e:
-        print(f"✗ Agent类导入失败: {e}")
-        return False
-    except Exception as e:
-        print(f"警告: Agent相关模块可能有依赖问题: {e}")
-    
-    print(f"\n=== 路由模式测试完成 ===")
-    return True
-
-if __name__ == "__main__":
-    success = test_routing_modes()
-    if success:
-        print("✓ 所有测试通过!路由模式功能实现成功!")
-    else:
-        print("✗ 测试失败,请检查实现。")

+ 0 - 146
test/test_thinking_control.py

@@ -1,146 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试thinking内容控制功能
-验证DISPLAY_RESULT_THINKING参数是否正确控制thinking内容的显示/隐藏
-"""
-
-import sys
-import os
-
-# 添加项目根目录到Python路径
-sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
-
-def test_thinking_removal():
-    """测试thinking内容移除功能"""
-    from customllm.base_llm_chat import BaseLLMChat
-    
-    # 创建一个测试类来测试_remove_thinking_content方法
-    class TestLLM(BaseLLMChat):
-        def submit_prompt(self, prompt, **kwargs):
-            return "测试响应"
-    
-    # 创建测试实例
-    test_llm = TestLLM(config={})
-    
-    # 测试用例
-    test_cases = [
-        # 基本thinking标签
-        {
-            "input": "<think>这是思考内容</think>这是最终答案",
-            "expected": "这是最终答案"
-        },
-        # 多行thinking标签
-        {
-            "input": "<think>\n这是多行\n思考内容\n</think>\n\n这是最终答案",
-            "expected": "这是最终答案"
-        },
-        # 大小写不敏感
-        {
-            "input": "<THINK>大写思考</THINK>最终答案",
-            "expected": "最终答案"
-        },
-        # 多个thinking标签
-        {
-            "input": "<think>第一段思考</think>中间内容<think>第二段思考</think>最终答案",
-            "expected": "中间内容最终答案"
-        },
-        # 没有thinking标签
-        {
-            "input": "这是没有thinking标签的普通文本",
-            "expected": "这是没有thinking标签的普通文本"
-        },
-        # 空文本
-        {
-            "input": "",
-            "expected": ""
-        },
-        # None输入
-        {
-            "input": None,
-            "expected": None
-        }
-    ]
-    
-    print("=== 测试thinking内容移除功能 ===")
-    
-    for i, test_case in enumerate(test_cases, 1):
-        input_text = test_case["input"]
-        expected = test_case["expected"]
-        
-        result = test_llm._remove_thinking_content(input_text)
-        
-        if result == expected:
-            print(f"✅ 测试用例 {i}: 通过")
-        else:
-            print(f"❌ 测试用例 {i}: 失败")
-            print(f"   输入: {repr(input_text)}")
-            print(f"   期望: {repr(expected)}")
-            print(f"   实际: {repr(result)}")
-    
-    print()
-
-def test_config_integration():
-    """测试配置集成"""
-    print("=== 测试配置集成 ===")
-    
-    try:
-        from app_config import DISPLAY_RESULT_THINKING
-        print(f"✅ 成功导入配置: DISPLAY_RESULT_THINKING = {DISPLAY_RESULT_THINKING}")
-        
-        from customllm.base_llm_chat import BaseLLMChat
-        print("✅ 成功导入BaseLLMChat类")
-        
-        # 检查类中是否正确导入了配置
-        import customllm.base_llm_chat as base_module
-        if hasattr(base_module, 'DISPLAY_RESULT_THINKING'):
-            print(f"✅ BaseLLMChat模块中的配置: DISPLAY_RESULT_THINKING = {base_module.DISPLAY_RESULT_THINKING}")
-        else:
-            print("❌ BaseLLMChat模块中未找到DISPLAY_RESULT_THINKING配置")
-            
-    except ImportError as e:
-        print(f"❌ 导入失败: {e}")
-    
-    print()
-
-def test_vanna_instance():
-    """测试Vanna实例的thinking处理"""
-    print("=== 测试Vanna实例thinking处理 ===")
-    
-    try:
-        from common.vanna_instance import get_vanna_instance
-        vn = get_vanna_instance()
-        
-        print(f"✅ 成功获取Vanna实例: {type(vn).__name__}")
-        
-        # 检查实例是否有_remove_thinking_content方法
-        if hasattr(vn, '_remove_thinking_content'):
-            print("✅ Vanna实例具有_remove_thinking_content方法")
-            
-            # 测试方法
-            test_text = "<think>测试思考</think>测试结果"
-            cleaned = vn._remove_thinking_content(test_text)
-            if cleaned == "测试结果":
-                print("✅ thinking内容移除功能正常工作")
-            else:
-                print(f"❌ thinking内容移除异常: {repr(cleaned)}")
-        else:
-            print("❌ Vanna实例缺少_remove_thinking_content方法")
-            
-    except Exception as e:
-        print(f"❌ 测试Vanna实例失败: {e}")
-    
-    print()
-
-def main():
-    """主测试函数"""
-    print("开始测试thinking内容控制功能...\n")
-    
-    # 运行所有测试
-    test_thinking_removal()
-    test_config_integration()
-    test_vanna_instance()
-    
-    print("测试完成!")
-
-if __name__ == "__main__":
-    main() 

+ 0 - 294
test/test_training_integration.py

@@ -1,294 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试training目录的代码集成
-验证训练相关的模块是否能正常工作
-"""
-
-def test_training_imports():
-    """测试训练模块的导入"""
-    print("=== 测试训练模块导入 ===")
-    
-    try:
-        # 测试从training包导入
-        from training import (
-            train_ddl,
-            train_documentation,
-            train_sql_example,
-            train_question_sql_pair,
-            flush_training,
-            shutdown_trainer
-        )
-        print("✅ 成功从training包导入所有函数")
-        
-        # 测试直接导入
-        from training.vanna_trainer import BatchProcessor
-        print("✅ 成功导入BatchProcessor类")
-        
-        return True
-        
-    except ImportError as e:
-        print(f"❌ 导入失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_config_access():
-    """测试配置访问"""
-    print("\n=== 测试配置访问 ===")
-    
-    try:
-        import app_config
-        
-        # 测试训练批处理配置
-        batch_enabled = getattr(app_config, 'TRAINING_BATCH_PROCESSING_ENABLED', None)
-        batch_size = getattr(app_config, 'TRAINING_BATCH_SIZE', None)
-        max_workers = getattr(app_config, 'TRAINING_MAX_WORKERS', None)
-        
-        print(f"批处理启用: {batch_enabled}")
-        print(f"批处理大小: {batch_size}")
-        print(f"最大工作线程: {max_workers}")
-        
-        if batch_enabled is not None and batch_size is not None and max_workers is not None:
-            print("✅ 训练批处理配置正常")
-        else:
-            print("⚠️  部分训练批处理配置缺失")
-        
-        # 测试向量数据库配置
-        vector_db_name = getattr(app_config, 'VECTOR_DB_NAME', None)
-        print(f"向量数据库类型: {vector_db_name}")
-        
-        if vector_db_name == "pgvector":
-            pgvector_config = getattr(app_config, 'PGVECTOR_CONFIG', None)
-            if pgvector_config:
-                print("✅ PgVector配置存在")
-            else:
-                print("❌ PgVector配置缺失")
-        
-        # 测试新的配置工具函数
-        try:
-            from common.utils import get_current_embedding_config, get_current_model_info
-            
-            embedding_config = get_current_embedding_config()
-            model_info = get_current_model_info()
-            
-            print(f"当前embedding类型: {model_info['embedding_type']}")
-            print(f"当前embedding模型: {model_info['embedding_model']}")
-            print("✅ 新配置工具函数正常工作")
-            
-        except Exception as e:
-            print(f"⚠️  新配置工具函数测试失败: {e}")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 配置访问测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_vanna_instance_creation():
-    """测试Vanna实例创建"""
-    print("\n=== 测试Vanna实例创建 ===")
-    
-    try:
-        from vanna_llm_factory import create_vanna_instance
-        
-        print("尝试创建Vanna实例...")
-        vn = create_vanna_instance()
-        
-        print(f"✅ 成功创建Vanna实例: {type(vn).__name__}")
-        
-        # 测试基本方法是否存在
-        required_methods = ['train', 'generate_question', 'get_training_data']
-        for method in required_methods:
-            if hasattr(vn, method):
-                print(f"✅ 方法 {method} 存在")
-            else:
-                print(f"⚠️  方法 {method} 不存在")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ Vanna实例创建失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_batch_processor():
-    """测试批处理器"""
-    print("\n=== 测试批处理器 ===")
-    
-    try:
-        from training.vanna_trainer import BatchProcessor
-        import app_config
-        
-        # 创建测试批处理器
-        batch_size = getattr(app_config, 'TRAINING_BATCH_SIZE', 5)
-        max_workers = getattr(app_config, 'TRAINING_MAX_WORKERS', 2)
-        
-        processor = BatchProcessor(batch_size=batch_size, max_workers=max_workers)
-        print(f"✅ 成功创建BatchProcessor实例")
-        print(f"   批处理大小: {processor.batch_size}")
-        print(f"   最大工作线程: {processor.max_workers}")
-        print(f"   批处理启用: {processor.batch_enabled}")
-        
-        # 测试关闭
-        processor.shutdown()
-        print("✅ 批处理器关闭成功")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 批处理器测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_training_functions():
-    """测试训练函数(不实际训练)"""
-    print("\n=== 测试训练函数 ===")
-    
-    try:
-        from training import (
-            train_ddl,
-            train_documentation,
-            train_sql_example,
-            train_question_sql_pair,
-            flush_training,
-            shutdown_trainer
-        )
-        
-        print("✅ 所有训练函数导入成功")
-        
-        # 测试函数是否可调用
-        functions_to_test = [
-            ('train_ddl', train_ddl),
-            ('train_documentation', train_documentation),
-            ('train_sql_example', train_sql_example),
-            ('train_question_sql_pair', train_question_sql_pair),
-            ('flush_training', flush_training),
-            ('shutdown_trainer', shutdown_trainer)
-        ]
-        
-        for func_name, func in functions_to_test:
-            if callable(func):
-                print(f"✅ {func_name} 是可调用的")
-            else:
-                print(f"❌ {func_name} 不可调用")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 训练函数测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_embedding_connection():
-    """测试embedding连接"""
-    print("\n=== 测试Embedding连接 ===")
-    
-    try:
-        from embedding_function import test_embedding_connection
-        
-        print("测试embedding模型连接...")
-        result = test_embedding_connection()
-        
-        if result["success"]:
-            print(f"✅ Embedding连接成功: {result['message']}")
-        else:
-            print(f"⚠️  Embedding连接失败: {result['message']}")
-            print("   这可能是因为API服务未启动或配置不正确")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ Embedding连接测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_run_training_script():
-    """测试run_training.py脚本的基本功能"""
-    print("\n=== 测试run_training.py脚本 ===")
-    
-    try:
-        # 导入run_training模块
-        import sys
-        import os
-        
-        # 添加training目录到路径
-        training_dir = os.path.join(os.path.dirname(__file__), 'training')
-        if training_dir not in sys.path:
-            sys.path.insert(0, training_dir)
-        
-        # 导入run_training模块的函数
-        from training.run_training import (
-            read_file_by_delimiter,
-            read_markdown_file_by_sections,
-            check_pgvector_connection
-        )
-        
-        print("✅ 成功导入run_training模块的函数")
-        
-        # 测试文件读取函数
-        test_content = "section1---section2---section3"
-        with open("test_temp.txt", "w", encoding="utf-8") as f:
-            f.write(test_content)
-        
-        try:
-            sections = read_file_by_delimiter("test_temp.txt", "---")
-            if len(sections) == 3:
-                print("✅ read_file_by_delimiter 函数正常工作")
-            else:
-                print(f"⚠️  read_file_by_delimiter 返回了 {len(sections)} 个部分,期望 3 个")
-        finally:
-            if os.path.exists("test_temp.txt"):
-                os.remove("test_temp.txt")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ run_training.py脚本测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def main():
-    """主测试函数"""
-    print("开始测试training目录的代码集成...")
-    print("=" * 60)
-    
-    results = []
-    
-    # 运行所有测试
-    results.append(("训练模块导入", test_training_imports()))
-    results.append(("配置访问", test_config_access()))
-    results.append(("Vanna实例创建", test_vanna_instance_creation()))
-    results.append(("批处理器", test_batch_processor()))
-    results.append(("训练函数", test_training_functions()))
-    results.append(("Embedding连接", test_embedding_connection()))
-    results.append(("run_training脚本", test_run_training_script()))
-    
-    # 总结
-    print(f"\n{'='*60}")
-    print("测试结果总结:")
-    print("=" * 60)
-    
-    for test_name, success in results:
-        status = "✅ 通过" if success else "❌ 失败"
-        print(f"{test_name}: {status}")
-    
-    total_passed = sum(1 for _, success in results if success)
-    print(f"\n总计: {total_passed}/{len(results)} 个测试通过")
-    
-    if total_passed == len(results):
-        print("🎉 所有测试都通过了!training目录的代码可以正常工作。")
-    elif total_passed >= len(results) - 1:
-        print("✅ 大部分测试通过,training目录的代码基本可以正常工作。")
-        print("   部分失败可能是由于服务未启动或配置问题。")
-    else:
-        print("⚠️  多个测试失败,请检查相关依赖和配置。")
-
-if __name__ == "__main__":
-    main() 

+ 0 - 235
test/test_vanna_combinations.py

@@ -1,235 +0,0 @@
-#!/usr/bin/env python3
-"""
-测试统一的Vanna组合类文件
-验证common/vanna_combinations.py中的功能
-"""
-
-def test_import_combinations():
-    """测试导入组合类"""
-    print("=== 测试导入组合类 ===")
-    
-    try:
-        from common.vanna_combinations import (
-            Vanna_Qwen_ChromaDB,
-            Vanna_DeepSeek_ChromaDB,
-            Vanna_Qwen_PGVector,
-            Vanna_DeepSeek_PGVector,
-            Vanna_Ollama_ChromaDB,
-            Vanna_Ollama_PGVector,
-            get_vanna_class,
-            list_available_combinations,
-            print_available_combinations
-        )
-        print("✅ 成功导入所有组合类和工具函数")
-        return True
-    except ImportError as e:
-        print(f"❌ 导入失败: {e}")
-        return False
-
-def test_get_vanna_class():
-    """测试get_vanna_class函数"""
-    print("\n=== 测试get_vanna_class函数 ===")
-    
-    try:
-        from common.vanna_combinations import get_vanna_class
-        
-        # 测试有效组合
-        test_cases = [
-            ("qwen", "chromadb"),
-            ("deepseek", "chromadb"),
-            ("qwen", "pgvector"),
-            ("deepseek", "pgvector"),
-            ("ollama", "chromadb"),
-            ("ollama", "pgvector"),
-        ]
-        
-        for llm_type, vector_db_type in test_cases:
-            try:
-                cls = get_vanna_class(llm_type, vector_db_type)
-                print(f"✅ {llm_type} + {vector_db_type} -> {cls.__name__}")
-            except Exception as e:
-                print(f"⚠️  {llm_type} + {vector_db_type} -> 错误: {e}")
-        
-        # 测试无效组合
-        print("\n测试无效组合:")
-        try:
-            get_vanna_class("invalid_llm", "chromadb")
-            print("❌ 应该抛出异常但没有")
-            return False
-        except ValueError:
-            print("✅ 正确处理无效LLM类型")
-        
-        try:
-            get_vanna_class("qwen", "invalid_db")
-            print("❌ 应该抛出异常但没有")
-            return False
-        except ValueError:
-            print("✅ 正确处理无效向量数据库类型")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        return False
-
-def test_list_available_combinations():
-    """测试列出可用组合"""
-    print("\n=== 测试列出可用组合 ===")
-    
-    try:
-        from common.vanna_combinations import list_available_combinations, print_available_combinations
-        
-        # 获取可用组合
-        combinations = list_available_combinations()
-        print(f"可用组合数据结构: {combinations}")
-        
-        # 打印可用组合
-        print("\n打印可用组合:")
-        print_available_combinations()
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_class_instantiation():
-    """测试类实例化(不需要实际服务)"""
-    print("\n=== 测试类实例化 ===")
-    
-    try:
-        from common.vanna_combinations import get_vanna_class
-        
-        # 测试ChromaDB组合(通常可用)
-        test_cases = [
-            ("qwen", "chromadb"),
-            ("deepseek", "chromadb"),
-        ]
-        
-        for llm_type, vector_db_type in test_cases:
-            try:
-                cls = get_vanna_class(llm_type, vector_db_type)
-                
-                # 尝试创建实例(使用空配置)
-                instance = cls(config={})
-                print(f"✅ 成功创建 {cls.__name__} 实例")
-                
-                # 检查实例类型
-                print(f"   实例类型: {type(instance)}")
-                print(f"   MRO: {[c.__name__ for c in type(instance).__mro__[:3]]}")
-                
-            except Exception as e:
-                print(f"⚠️  创建 {llm_type}+{vector_db_type} 实例失败: {e}")
-        
-        return True
-        
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def test_factory_integration():
-    """测试与工厂函数的集成"""
-    print("\n=== 测试与工厂函数的集成 ===")
-    
-    try:
-        import app_config
-        from common.utils import print_current_config
-        
-        # 保存原始配置
-        original_llm_type = app_config.LLM_MODEL_TYPE
-        original_embedding_type = app_config.EMBEDDING_MODEL_TYPE
-        original_vector_db = app_config.VECTOR_DB_NAME
-        
-        try:
-            # 测试不同配置
-            test_configs = [
-                ("api", "api", "qwen", "chromadb"),
-                ("api", "api", "deepseek", "chromadb"),
-                ("ollama", "ollama", None, "chromadb"),
-            ]
-            
-            for llm_type, emb_type, llm_name, vector_db in test_configs:
-                print(f"\n--- 测试配置: LLM={llm_type}, EMB={emb_type}, MODEL={llm_name}, DB={vector_db} ---")
-                
-                # 设置配置
-                app_config.LLM_MODEL_TYPE = llm_type
-                app_config.EMBEDDING_MODEL_TYPE = emb_type
-                if llm_name:
-                    app_config.LLM_MODEL_NAME = llm_name
-                app_config.VECTOR_DB_NAME = vector_db
-                
-                # 打印当前配置
-                print_current_config()
-                
-                # 测试工厂函数(不实际创建实例,只测试类选择)
-                try:
-                    from vanna_llm_factory import create_vanna_instance
-                    from common.utils import get_current_model_info, is_using_ollama_llm
-                    from common.vanna_combinations import get_vanna_class
-                    
-                    model_info = get_current_model_info()
-                    
-                    if is_using_ollama_llm():
-                        selected_llm_type = "ollama"
-                    else:
-                        selected_llm_type = model_info["llm_model"].lower()
-                    
-                    selected_vector_db = model_info["vector_db"].lower()
-                    
-                    cls = get_vanna_class(selected_llm_type, selected_vector_db)
-                    print(f"✅ 工厂函数会选择: {cls.__name__}")
-                    
-                except Exception as e:
-                    print(f"⚠️  工厂函数测试失败: {e}")
-            
-            return True
-            
-        finally:
-            # 恢复原始配置
-            app_config.LLM_MODEL_TYPE = original_llm_type
-            app_config.EMBEDDING_MODEL_TYPE = original_embedding_type
-            app_config.VECTOR_DB_NAME = original_vector_db
-            
-    except Exception as e:
-        print(f"❌ 测试失败: {e}")
-        import traceback
-        traceback.print_exc()
-        return False
-
-def main():
-    """主测试函数"""
-    print("开始测试统一的Vanna组合类...")
-    print("=" * 60)
-    
-    results = []
-    
-    # 运行所有测试
-    results.append(("导入组合类", test_import_combinations()))
-    results.append(("get_vanna_class函数", test_get_vanna_class()))
-    results.append(("列出可用组合", test_list_available_combinations()))
-    results.append(("类实例化", test_class_instantiation()))
-    results.append(("工厂函数集成", test_factory_integration()))
-    
-    # 总结
-    print(f"\n{'='*60}")
-    print("测试结果总结:")
-    print("=" * 60)
-    
-    for test_name, success in results:
-        status = "✅ 通过" if success else "❌ 失败"
-        print(f"{test_name}: {status}")
-    
-    total_passed = sum(1 for _, success in results if success)
-    print(f"\n总计: {total_passed}/{len(results)} 个测试通过")
-    
-    if total_passed == len(results):
-        print("🎉 所有测试都通过了!统一组合类文件工作正常。")
-    else:
-        print("⚠️  部分测试失败,请检查相关依赖和配置。")
-
-if __name__ == "__main__":
-    main() 

+ 0 - 103
test/test_vanna_singleton.py

@@ -1,103 +0,0 @@
-"""
-测试 Vanna 单例模式是否正常工作
-"""
-import sys
-import os
-
-# 添加项目根目录到路径
-sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
-
-def test_vanna_singleton():
-    """测试 Vanna 单例模式"""
-    from common.vanna_instance import get_vanna_instance, get_instance_status
-    
-    print("=" * 50)
-    print("测试 Vanna 单例模式")
-    print("=" * 50)
-    
-    # 检查初始状态
-    status = get_instance_status()
-    print(f"初始状态: {status}")
-    
-    # 第一次获取实例
-    print("\n第一次获取实例...")
-    instance1 = get_vanna_instance()
-    print(f"实例1 ID: {id(instance1)}")
-    print(f"实例1 类型: {type(instance1)}")
-    
-    # 第二次获取实例(应该是同一个)
-    print("\n第二次获取实例...")
-    instance2 = get_vanna_instance()
-    print(f"实例2 ID: {id(instance2)}")
-    print(f"实例2 类型: {type(instance2)}")
-    
-    # 验证是否为同一个实例
-    is_same = instance1 is instance2
-    print(f"\n实例是否相同: {is_same}")
-    
-    # 检查最终状态
-    final_status = get_instance_status()
-    print(f"最终状态: {final_status}")
-    
-    if is_same:
-        print("\n✅ 单例模式测试通过!")
-    else:
-        print("\n❌ 单例模式测试失败!")
-    
-    return is_same
-
-def test_import_from_tools():
-    """测试从工具文件导入是否正常"""
-    print("\n" + "=" * 50)
-    print("测试从工具文件导入")
-    print("=" * 50)
-    
-    try:
-        # 导入工具模块
-        from agent.tools.sql_generation import get_vanna_instance as gen_instance
-        from agent.tools.sql_execution import get_vanna_instance as exec_instance
-        from agent.tools.summary_generation import get_vanna_instance as sum_instance
-        
-        # 获取实例
-        instance_gen = gen_instance()
-        instance_exec = exec_instance()
-        instance_sum = sum_instance()
-        
-        print(f"SQL生成工具实例 ID: {id(instance_gen)}")
-        print(f"SQL执行工具实例 ID: {id(instance_exec)}")
-        print(f"摘要生成工具实例 ID: {id(instance_sum)}")
-        
-        # 验证是否都是同一个实例
-        all_same = (instance_gen is instance_exec) and (instance_exec is instance_sum)
-        
-        if all_same:
-            print("\n✅ 工具导入测试通过!所有工具使用同一个实例")
-        else:
-            print("\n❌ 工具导入测试失败!工具使用不同的实例")
-        
-        return all_same
-        
-    except Exception as e:
-        print(f"\n❌ 导入测试异常: {str(e)}")
-        return False
-
-if __name__ == "__main__":
-    try:
-        singleton_test = test_vanna_singleton()
-        import_test = test_import_from_tools()
-        
-        print("\n" + "=" * 50)
-        print("测试总结")
-        print("=" * 50)
-        print(f"单例模式测试: {'通过' if singleton_test else '失败'}")
-        print(f"工具导入测试: {'通过' if import_test else '失败'}")
-        
-        if singleton_test and import_test:
-            print("\n🎉 所有测试通过!Vanna 单例模式工作正常")
-        else:
-            print("\n⚠️  存在测试失败,请检查实现")
-            
-    except Exception as e:
-        print(f"测试执行异常: {str(e)}")
-        import traceback
-        traceback.print_exc() 

+ 0 - 4263
test/vanna_agent_test.ipynb

@@ -1,4263 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "# Vanna Agent Test - 基于 create_react_agent 的实现\n",
-        "\n",
-        "## 目标\n",
-        "使用 LangGraph 的 `create_react_agent()` 创建一个包含四个工具的智能Agent:\n",
-        "1. generate_sql - 生成SQL\n",
-        "2. valid_sql - 验证SQL\n",
-        "3. run_sql - 执行SQL\n",
-        "4. generate_summary - 生成摘要\n",
-        "\n",
-        "## 架构\n",
-        "- 三节点结构:Agent节点 → Tools节点 → END节点\n",
-        "- Agent自主决策是否需要查询数据库\n",
-        "- 对于常识问题直接用LLM回答"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 1. 环境准备和导入"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 1,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "项目根目录: c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\n",
-            "common目录存在: True\n",
-            "Python路径已更新: True\n",
-            "✅ 导入完成\n"
-          ]
-        }
-      ],
-      "source": [
-        "# 添加项目根目录到Python路径\n",
-        "import sys\n",
-        "import os\n",
-        "\n",
-        "# 方法1: 基于当前notebook文件位置计算项目根目录\n",
-        "current_dir = os.path.dirname(os.path.abspath(__file__)) if '__file__' in globals() else os.getcwd()\n",
-        "project_root = os.path.dirname(current_dir)  # test/ 的上一级就是项目根目录\n",
-        "\n",
-        "# 方法2: 备用方案,如果方法1失败\n",
-        "if not os.path.exists(os.path.join(project_root, 'common')):\n",
-        "    # 尝试当前工作目录的上一级\n",
-        "    project_root = os.path.dirname(os.getcwd())\n",
-        "\n",
-        "# 添加到Python路径\n",
-        "if project_root not in sys.path:\n",
-        "    sys.path.insert(0, project_root)\n",
-        "\n",
-        "print(f\"项目根目录: {project_root}\")\n",
-        "print(f\"common目录存在: {os.path.exists(os.path.join(project_root, 'common'))}\")\n",
-        "print(f\"Python路径已更新: {project_root in sys.path}\")\n",
-        "\n",
-        "# 基础导入\n",
-        "from typing import Dict, Any, List, Optional\n",
-        "import pandas as pd\n",
-        "import re\n",
-        "import json\n",
-        "from datetime import datetime\n",
-        "\n",
-        "# LangChain/LangGraph 导入\n",
-        "from langchain.tools import tool\n",
-        "from langchain_core.messages import HumanMessage, AIMessage, SystemMessage\n",
-        "from langgraph.prebuilt import create_react_agent\n",
-        "\n",
-        "# 项目导入\n",
-        "from common.vanna_instance import get_vanna_instance\n",
-        "from common.utils import get_current_llm_config\n",
-        "\n",
-        "print(\"✅ 导入完成\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 2,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ 配置参数已设置\n",
-            "最大工具调用次数: 10\n",
-            "最大返回行数: 200\n"
-          ]
-        }
-      ],
-      "source": [
-        "# ========== 可配置参数 ==========\n",
-        "\n",
-        "# 最大工具调用次数(防止无限循环)\n",
-        "MAX_TOOL_CALLS = 10\n",
-        "\n",
-        "# 最大返回行数\n",
-        "MAX_RETURN_ROWS = 200\n",
-        "\n",
-        "# 是否启用详细日志\n",
-        "VERBOSE = True\n",
-        "\n",
-        "# 数据库业务范围描述(请根据实际情况修改)\n",
-        "DATABASE_SCOPE = \"\"\"\n",
-        "=== 数据库业务范围 ===\n",
-        "本系统是高速公路服务区商业管理系统,包含以下业务数据:\n",
-        "\n",
-        "核心业务实体:\n",
-        "- 服务区(bss_service_area):服务区基础信息、位置、状态\n",
-        "- 档口/商铺(bss_business_day_data):档口信息、品类、营业数据\n",
-        "- 车流量(bss_car_day_count):按车型统计的日流量数据\n",
-        "- 公司信息(bss_company):服务区管理公司\n",
-        "\n",
-        "关键业务指标:\n",
-        "- 支付方式:微信支付、支付宝支付、现金支付等\n",
-        "- 营业数据:支付金额、订单数量、营业额、收入统计\n",
-        "- 车流统计:按车型的流量分析\n",
-        "- 经营分析:餐饮、小吃、便利店等品类收入\n",
-        "\n",
-        "时间范围:\n",
-        "- 数据更新到最近的营业日\n",
-        "- 历史数据可追溯到系统上线时间\n",
-        "\"\"\"\n",
-        "\n",
-        "print(\"✅ 配置参数已设置\")\n",
-        "print(f\"最大工具调用次数: {MAX_TOOL_CALLS}\")\n",
-        "print(f\"最大返回行数: {MAX_RETURN_ROWS}\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 3. 获取LLM实例\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 3,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔧 检测到模型: qwen-plus\n",
-            "🔧 为模型 qwen-plus 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n"
-          ]
-        }
-      ],
-      "source": [
-        "def get_llm():\n",
-        "    \"\"\"获取兼容的LLM实例\"\"\"\n",
-        "    try:\n",
-        "        # 尝试使用OpenAI兼容的API\n",
-        "        from langchain_openai import ChatOpenAI\n",
-        "        from common.utils import get_current_llm_config\n",
-        "        \n",
-        "        llm_config = get_current_llm_config()\n",
-        "        \n",
-        "        if llm_config.get(\"base_url\") and llm_config.get(\"api_key\"):\n",
-        "            # 构建参数,确保thinking功能正确设置\n",
-        "            model_name = llm_config.get(\"model\", \"\").lower()\n",
-        "            print(f\"🔧 检测到模型: {model_name}\")\n",
-        "            \n",
-        "            # 方法1:尝试使用model_kwargs传递参数\n",
-        "            model_kwargs = {}\n",
-        "            if \"deepseek\" in model_name or \"qianwen\" in model_name or \"qwen\" in model_name:\n",
-        "                model_kwargs[\"enable_thinking\"] = False\n",
-        "                print(f\"🔧 为模型 {model_name} 设置 enable_thinking=False\")\n",
-        "            \n",
-        "            llm = ChatOpenAI(\n",
-        "                base_url=llm_config.get(\"base_url\"),\n",
-        "                api_key=llm_config.get(\"api_key\"),\n",
-        "                model=llm_config.get(\"model\"),\n",
-        "                temperature=llm_config.get(\"temperature\", 0.7),\n",
-        "                model_kwargs=model_kwargs\n",
-        "            )\n",
-        "            print(\"✅ 使用OpenAI兼容API(方法1:model_kwargs)\")\n",
-        "            return llm\n",
-        "    except Exception as e:\n",
-        "        print(f\"⚠️ OpenAI API方法1失败: {e}\")\n",
-        "        \n",
-        "        # 方法2:尝试使用extra_body\n",
-        "        try:\n",
-        "            from langchain_openai import ChatOpenAI\n",
-        "            from common.utils import get_current_llm_config\n",
-        "            \n",
-        "            llm_config = get_current_llm_config()\n",
-        "            \n",
-        "            if llm_config.get(\"base_url\") and llm_config.get(\"api_key\"):\n",
-        "                model_name = llm_config.get(\"model\", \"\").lower()\n",
-        "                \n",
-        "                llm = ChatOpenAI(\n",
-        "                    base_url=llm_config.get(\"base_url\"),\n",
-        "                    api_key=llm_config.get(\"api_key\"),\n",
-        "                    model=llm_config.get(\"model\"),\n",
-        "                    temperature=llm_config.get(\"temperature\", 0.7),\n",
-        "                    extra_body={\"enable_thinking\": False}\n",
-        "                )\n",
-        "                print(\"✅ 使用OpenAI兼容API(方法2:extra_body)\")\n",
-        "                return llm\n",
-        "        except Exception as e2:\n",
-        "            print(f\"⚠️ OpenAI API方法2失败: {e2}\")\n",
-        "    \n",
-        "    # 回退方案:创建一个简单的包装器\n",
-        "    from langchain_core.language_models import BaseChatModel\n",
-        "    from langchain_core.messages import BaseMessage, AIMessage\n",
-        "    from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "    \n",
-        "    class VannaLLMWrapper(BaseChatModel):\n",
-        "        \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "        \n",
-        "        def __init__(self):\n",
-        "            super().__init__()\n",
-        "            self.vn = get_vanna_instance()\n",
-        "        \n",
-        "        def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "            # 构建提示词\n",
-        "            prompt = \"\"\n",
-        "            for msg in messages:\n",
-        "                if isinstance(msg, SystemMessage):\n",
-        "                    prompt = msg.content + \"\\n\\n\"\n",
-        "                elif isinstance(msg, HumanMessage):\n",
-        "                    prompt += f\"用户: {msg.content}\\n\"\n",
-        "                elif isinstance(msg, AIMessage):\n",
-        "                    prompt += f\"助手: {msg.content}\\n\"\n",
-        "            \n",
-        "            # 调用Vanna,确保禁用thinking\n",
-        "            try:\n",
-        "                # 直接调用项目中的LLM实例,它应该已经正确配置了thinking参数\n",
-        "                response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            except TypeError:\n",
-        "                # 如果不支持enable_thinking参数,使用默认调用\n",
-        "                try:\n",
-        "                    response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                except TypeError:\n",
-        "                    # 最后的备用方案\n",
-        "                    response = self.vn.chat_with_llm(question=prompt)\n",
-        "            \n",
-        "            # 返回结果\n",
-        "            message = AIMessage(content=response)\n",
-        "            generation = ChatGeneration(message=message)\n",
-        "            return ChatResult(generations=[generation])\n",
-        "        \n",
-        "        @property\n",
-        "        def _llm_type(self) -> str:\n",
-        "            return \"vanna_wrapper\"\n",
-        "    \n",
-        "    print(\"✅ 使用Vanna LLM包装器\")\n",
-        "    return VannaLLMWrapper()\n",
-        "\n",
-        "# 获取LLM实例\n",
-        "llm = get_llm()\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 4,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:40:13,350 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:40:23,136 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:40:23,137 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:40:23,138 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:40:23,139 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:40:23,139 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:40:23,140 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:40:23,142 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:40:23,142 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:40:23,143 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:40:23,144 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:40:23,393 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:40:23,394 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:40:23,395 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:40:23,396 - vanna.BaseLLMChat - INFO -   model: qwen-plus\n",
-            "2025-07-08 09:40:23,397 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:40:23,398 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:40:23,398 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO -   stream: False\n",
-            "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:40:23,400 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:40:23,400 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000024E2E5135C0>\n",
-            "2025-07-08 09:40:23,401 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:40:23,402 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:40:24,662 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:40:24,663 - app.VannaSingleton - INFO - Vanna 实例创建成功\n",
-            "2025-07-08 09:40:24,668 - vanna.BaseLLMChat - INFO - \n",
-            "Using model qwen-plus for 18.5 tokens (approx)\n",
-            "2025-07-08 09:40:24,668 - vanna.BaseLLMChat - INFO - Enable thinking: False, Stream mode: False\n",
-            "2025-07-08 09:40:24,669 - vanna.BaseLLMChat - INFO - 使用非流式处理模式\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ 使用Vanna LLM包装器\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "🔧 成功禁用thinking和stream\n",
-            "✅ LLM测试成功: 测试成功\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    # 使用类配置允许额外字段\n",
-        "    model_config = {\"extra\": \"allow\"}\n",
-        "    \n",
-        "    def __init__(self, **kwargs):\n",
-        "        super().__init__(**kwargs)\n",
-        "        # 在初始化后设置vn实例\n",
-        "        object.__setattr__(self, 'vn', get_vanna_instance())\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 5,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 55\u001b[39m\n\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
-          ]
-        },
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "⚠️  检测到thinking参数问题,直接使用Vanna包装器...\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
-            "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
-            "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
-            "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
-            "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
-            "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
-            "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO -   api_key: sk-db68e37f00974031935395315bfe07f0\n",
-            "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO -   base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
-            "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO -   model: qwen3-235b-a22b\n",
-            "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO -   allow_llm_to_see_data: True\n",
-            "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO -   temperature: 0.6\n",
-            "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO -   n_results: 6\n",
-            "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO -   language: Chinese\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   stream: True\n",
-            "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO -   enable_thinking: False\n",
-            "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO -   connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
-            "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO -   embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
-            "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
-            "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
-            "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
-            "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
-          ]
-        },
-        {
-          "ename": "ValueError",
-          "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
-          "output_type": "error",
-          "traceback": [
-            "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
-            "\u001b[31mValueError\u001b[39m                                Traceback (most recent call last)\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
-            "\u001b[32m     52\u001b[39m         \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
-            "\u001b[32m     54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
-            "\u001b[32m     56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
-            "\u001b[32m     58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
-            "\n",
-            "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
-            "\u001b[32m     15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
-            "\u001b[32m     16\u001b[39m     \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
-            "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m     \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m    995\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
-            "\u001b[32m    996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
-            "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m    998\u001b[39m     setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)  \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
-            "\u001b[32m    999\u001b[39m     \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
-            "\n",
-            "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
-            "\u001b[32m   1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
-            "\u001b[32m   1042\u001b[39m     \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
-            "\u001b[32m   1043\u001b[39m         \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
-            "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m         \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
-            "\u001b[32m   1045\u001b[39m     \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
-            "\u001b[32m   1046\u001b[39m         \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
-            "\u001b[32m   1047\u001b[39m         \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
-            "\n",
-            "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "print(\"⚠️  检测到thinking参数问题,直接使用Vanna包装器...\")\n",
-        "\n",
-        "# 直接创建Vanna包装器\n",
-        "from langchain_core.language_models import BaseChatModel\n",
-        "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
-        "from langchain_core.outputs import ChatResult, ChatGeneration\n",
-        "\n",
-        "class VannaLLMWrapper(BaseChatModel):\n",
-        "    \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
-        "    \n",
-        "    def __init__(self):\n",
-        "        super().__init__()\n",
-        "        self.vn = get_vanna_instance()\n",
-        "    \n",
-        "    def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
-        "        # 构建提示词\n",
-        "        prompt = \"\"\n",
-        "        for msg in messages:\n",
-        "            if isinstance(msg, SystemMessage):\n",
-        "                prompt = msg.content + \"\\n\\n\"\n",
-        "            elif isinstance(msg, HumanMessage):\n",
-        "                prompt += f\"用户: {msg.content}\\n\"\n",
-        "            elif isinstance(msg, AIMessage):\n",
-        "                prompt += f\"助手: {msg.content}\\n\"\n",
-        "        \n",
-        "        # 调用Vanna,确保禁用thinking和stream\n",
-        "        try:\n",
-        "            # 尝试禁用thinking和stream\n",
-        "            response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
-        "            print(\"🔧 成功禁用thinking和stream\")\n",
-        "        except TypeError:\n",
-        "            try:\n",
-        "                # 尝试只禁用stream\n",
-        "                response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
-        "                print(\"🔧 成功禁用stream\")\n",
-        "            except TypeError:\n",
-        "                # 最后的备用方案\n",
-        "                response = self.vn.chat_with_llm(question=prompt)\n",
-        "                print(\"🔧 使用默认调用\")\n",
-        "        \n",
-        "        # 返回结果\n",
-        "        message = AIMessage(content=response)\n",
-        "        generation = ChatGeneration(message=message)\n",
-        "        return ChatResult(generations=[generation])\n",
-        "    \n",
-        "    @property\n",
-        "    def _llm_type(self) -> str:\n",
-        "        return \"vanna_wrapper\"\n",
-        "\n",
-        "# 创建LLM实例\n",
-        "llm = VannaLLMWrapper()\n",
-        "print(\"✅ 使用Vanna LLM包装器\")\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"检查Vanna实例是否正常工作...\")\n",
-        "    \n",
-        "    # 直接测试Vanna实例\n",
-        "    try:\n",
-        "        vn = get_vanna_instance()\n",
-        "        direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
-        "        print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
-        "    except Exception as e2:\n",
-        "        print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
-        "        print(\"请检查您的LLM配置和网络连接\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔄 重新创建LLM实例...\n",
-            "🔧 检测到模型: qwen3-235b-a22b\n",
-            "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
-            "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
-            "\n",
-            "🧪 测试LLM基础功能...\n",
-            "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
-            "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
-        "\n",
-        "# 重新获取LLM实例\n",
-        "print(\"🔄 重新创建LLM实例...\")\n",
-        "llm = get_llm()\n",
-        "\n",
-        "# 测试LLM基础功能\n",
-        "print(\"\\n🧪 测试LLM基础功能...\")\n",
-        "try:\n",
-        "    from langchain_core.messages import HumanMessage\n",
-        "    test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
-        "    print(f\"✅ LLM测试成功: {test_response.content}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"❌ LLM测试失败: {e}\")\n",
-        "    print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 4. 定义工具函数\n",
-        "### 4.1 generate_sql 工具"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 11,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ generate_sql 工具已定义\n"
-          ]
-        }
-      ],
-      "source": [
-        "@tool\n",
-        "def generate_sql(question: str) -> Dict[str, Any]:\n",
-        "    \"\"\"\n",
-        "    将自然语言问题转换为SQL查询。\n",
-        "    \n",
-        "    Args:\n",
-        "        question: 需要转换为SQL的自然语言问题\n",
-        "    \n",
-        "    Returns:\n",
-        "        包含SQL生成结果的字典\n",
-        "    \"\"\"\n",
-        "    try:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"🔧 [generate_sql] 输入问题: {question}\")\n",
-        "        \n",
-        "        vn = get_vanna_instance()\n",
-        "        sql = vn.generate_sql(question=question, allow_llm_to_see_data=True)\n",
-        "        \n",
-        "        if sql is None:\n",
-        "            # 检查是否有解释性文本\n",
-        "            explanation = getattr(vn, 'last_llm_explanation', None)\n",
-        "            if explanation:\n",
-        "                return {\n",
-        "                    \"success\": False,\n",
-        "                    \"sql\": None,\n",
-        "                    \"error\": explanation,\n",
-        "                    \"error_type\": \"no_relevant_data\"\n",
-        "                }\n",
-        "            else:\n",
-        "                return {\n",
-        "                    \"success\": False,\n",
-        "                    \"sql\": None,\n",
-        "                    \"error\": \"无法生成SQL查询,可能是问题描述不够明确或数据表结构不匹配\",\n",
-        "                    \"error_type\": \"generation_failed\"\n",
-        "                }\n",
-        "        \n",
-        "        if VERBOSE:\n",
-        "            print(f\"✅ [generate_sql] 生成的SQL: {sql}\")\n",
-        "        \n",
-        "        return {\n",
-        "            \"success\": True,\n",
-        "            \"sql\": sql,\n",
-        "            \"error\": None\n",
-        "        }\n",
-        "        \n",
-        "    except Exception as e:\n",
-        "        return {\n",
-        "            \"success\": False,\n",
-        "            \"sql\": None,\n",
-        "            \"error\": f\"SQL生成异常: {str(e)}\",\n",
-        "            \"error_type\": \"exception\"\n",
-        "        }\n",
-        "\n",
-        "print(\"✅ generate_sql 工具已定义\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 4.2 valid_sql 工具"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 12,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ valid_sql 工具已定义\n"
-          ]
-        }
-      ],
-      "source": [
-        "@tool\n",
-        "def valid_sql(sql: str) -> Dict[str, Any]:\n",
-        "    \"\"\"\n",
-        "    验证SQL语句的正确性。\n",
-        "    \n",
-        "    Args:\n",
-        "        sql: 要验证的SQL语句\n",
-        "    \n",
-        "    Returns:\n",
-        "        包含验证结果的字典\n",
-        "    \"\"\"\n",
-        "    try:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"🔧 [valid_sql] 验证SQL: {sql[:100]}...\")\n",
-        "        \n",
-        "        # 1. 基础格式检查\n",
-        "        if not sql or not sql.strip():\n",
-        "            return {\n",
-        "                \"success\": True,\n",
-        "                \"valid\": False,\n",
-        "                \"error\": \"SQL语句为空\"\n",
-        "            }\n",
-        "        \n",
-        "        # 2. 禁止词检查\n",
-        "        forbidden_operations = ['UPDATE', 'DELETE', 'DROP', 'ALTER', 'INSERT']\n",
-        "        sql_upper = sql.upper().strip()\n",
-        "        \n",
-        "        for operation in forbidden_operations:\n",
-        "            if sql_upper.startswith(operation):\n",
-        "                return {\n",
-        "                    \"success\": True,\n",
-        "                    \"valid\": False,\n",
-        "                    \"error\": f\"不允许的操作: {operation}。本系统只支持查询操作(SELECT)。\"\n",
-        "                }\n",
-        "        \n",
-        "        # 3. 语法验证(使用EXPLAIN)\n",
-        "        vn = get_vanna_instance()\n",
-        "        explain_sql = f\"EXPLAIN {sql}\"\n",
-        "        \n",
-        "        try:\n",
-        "            result = vn.run_sql(explain_sql)\n",
-        "            if result is not None:\n",
-        "                if VERBOSE:\n",
-        "                    print(\"✅ [valid_sql] SQL验证通过\")\n",
-        "                return {\n",
-        "                    \"success\": True,\n",
-        "                    \"valid\": True,\n",
-        "                    \"error\": None\n",
-        "                }\n",
-        "            else:\n",
-        "                return {\n",
-        "                    \"success\": True,\n",
-        "                    \"valid\": False,\n",
-        "                    \"error\": \"SQL语法验证失败\"\n",
-        "                }\n",
-        "        except Exception as e:\n",
-        "            error_msg = str(e)\n",
-        "            if VERBOSE:\n",
-        "                print(f\"❌ [valid_sql] 验证失败: {error_msg}\")\n",
-        "            return {\n",
-        "                \"success\": True,\n",
-        "                \"valid\": False,\n",
-        "                \"error\": f\"SQL语法错误: {error_msg}\"\n",
-        "            }\n",
-        "            \n",
-        "    except Exception as e:\n",
-        "        return {\n",
-        "            \"success\": False,\n",
-        "            \"valid\": False,\n",
-        "            \"error\": f\"验证过程异常: {str(e)}\"\n",
-        "        }\n",
-        "\n",
-        "print(\"✅ valid_sql 工具已定义\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 4.3 run_sql 工具\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 13,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ run_sql 工具已定义\n"
-          ]
-        }
-      ],
-      "source": [
-        "@tool\n",
-        "def run_sql(sql: str) -> Dict[str, Any]:\n",
-        "    \"\"\"\n",
-        "    执行SQL查询并返回结果。\n",
-        "    \n",
-        "    Args:\n",
-        "        sql: 要执行的SQL查询语句\n",
-        "    \n",
-        "    Returns:\n",
-        "        包含查询结果的字典\n",
-        "    \"\"\"\n",
-        "    try:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"🔧 [run_sql] 执行SQL: {sql[:100]}...\")\n",
-        "        \n",
-        "        vn = get_vanna_instance()\n",
-        "        df = vn.run_sql(sql)\n",
-        "        \n",
-        "        if df is None:\n",
-        "            return {\n",
-        "                \"success\": False,\n",
-        "                \"data\": None,\n",
-        "                \"error\": \"SQL执行返回空结果\",\n",
-        "                \"row_count\": 0\n",
-        "            }\n",
-        "        \n",
-        "        if not isinstance(df, pd.DataFrame):\n",
-        "            return {\n",
-        "                \"success\": False,\n",
-        "                \"data\": None,\n",
-        "                \"error\": f\"SQL执行返回非DataFrame类型: {type(df)}\",\n",
-        "                \"row_count\": 0\n",
-        "            }\n",
-        "        \n",
-        "        if df.empty:\n",
-        "            return {\n",
-        "                \"success\": True,\n",
-        "                \"data\": [],\n",
-        "                \"columns\": list(df.columns),\n",
-        "                \"row_count\": 0,\n",
-        "                \"message\": \"查询执行成功,但没有找到符合条件的数据\"\n",
-        "            }\n",
-        "        \n",
-        "        # 处理数据结果\n",
-        "        total_rows = len(df)\n",
-        "        limited_df = df.head(MAX_RETURN_ROWS)\n",
-        "        \n",
-        "        # 转换为字典格式\n",
-        "        rows = limited_df.to_dict(orient=\"records\")\n",
-        "        columns = list(df.columns)\n",
-        "        \n",
-        "        if VERBOSE:\n",
-        "            print(f\"✅ [run_sql] 查询成功,返回 {total_rows} 行数据\")\n",
-        "        \n",
-        "        result = {\n",
-        "            \"success\": True,\n",
-        "            \"data\": rows,\n",
-        "            \"columns\": columns,\n",
-        "            \"row_count\": len(rows),\n",
-        "            \"total_row_count\": total_rows,\n",
-        "            \"is_limited\": total_rows > MAX_RETURN_ROWS\n",
-        "        }\n",
-        "        \n",
-        "        if total_rows > MAX_RETURN_ROWS:\n",
-        "            result[\"message\"] = f\"共 {total_rows} 行数据,已限制显示前 {MAX_RETURN_ROWS} 行\"\n",
-        "        \n",
-        "        return result\n",
-        "        \n",
-        "    except Exception as e:\n",
-        "        error_msg = str(e)\n",
-        "        if VERBOSE:\n",
-        "            print(f\"❌ [run_sql] 执行失败: {error_msg}\")\n",
-        "        \n",
-        "        return {\n",
-        "            \"success\": False,\n",
-        "            \"data\": None,\n",
-        "            \"error\": f\"SQL执行失败: {error_msg}\",\n",
-        "            \"row_count\": 0\n",
-        "        }\n",
-        "\n",
-        "print(\"✅ run_sql 工具已定义\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 4.4 generate_summary 工具\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 14,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ generate_summary 工具已定义\n"
-          ]
-        }
-      ],
-      "source": [
-        "@tool\n",
-        "def generate_summary(question: str, query_result: str) -> Dict[str, Any]:\n",
-        "    \"\"\"\n",
-        "    为查询结果生成自然语言摘要。\n",
-        "    \n",
-        "    Args:\n",
-        "        question: 原始问题\n",
-        "        query_result: 查询结果的JSON字符串\n",
-        "    \n",
-        "    Returns:\n",
-        "        包含摘要结果的字典\n",
-        "    \"\"\"\n",
-        "    try:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"🔧 [generate_summary] 为问题生成摘要: {question}\")\n",
-        "        \n",
-        "        # 解析查询结果\n",
-        "        try:\n",
-        "            result_data = json.loads(query_result) if isinstance(query_result, str) else query_result\n",
-        "        except:\n",
-        "            result_data = {\"error\": \"无法解析查询结果\"}\n",
-        "        \n",
-        "        # 检查是否有数据\n",
-        "        if not result_data.get(\"success\") or not result_data.get(\"data\"):\n",
-        "            return {\n",
-        "                \"success\": True,\n",
-        "                \"summary\": \"查询执行完成,但没有找到符合条件的数据。\"\n",
-        "            }\n",
-        "        \n",
-        "        # 重构DataFrame用于摘要生成\n",
-        "        rows = result_data.get(\"data\", [])\n",
-        "        columns = result_data.get(\"columns\", [])\n",
-        "        \n",
-        "        if rows and columns:\n",
-        "            df = pd.DataFrame(rows, columns=columns)\n",
-        "            \n",
-        "            # 调用Vanna生成摘要\n",
-        "            vn = get_vanna_instance()\n",
-        "            summary = vn.generate_summary(question=question, df=df)\n",
-        "            \n",
-        "            if summary:\n",
-        "                if VERBOSE:\n",
-        "                    print(f\"✅ [generate_summary] 摘要生成成功\")\n",
-        "                return {\n",
-        "                    \"success\": True,\n",
-        "                    \"summary\": summary\n",
-        "                }\n",
-        "        \n",
-        "        # 生成默认摘要\n",
-        "        row_count = result_data.get(\"row_count\", 0)\n",
-        "        summary = f\"根据您的问题『{question}』,查询返回了 {row_count} 条记录。\"\n",
-        "        \n",
-        "        if columns:\n",
-        "            summary += f\"数据包含以下字段:{', '.join(columns[:5])}\" \n",
-        "            if len(columns) > 5:\n",
-        "                summary += f\"等{len(columns)}个字段。\"\n",
-        "            else:\n",
-        "                summary += \"。\"\n",
-        "        \n",
-        "        return {\n",
-        "            \"success\": True,\n",
-        "            \"summary\": summary\n",
-        "        }\n",
-        "        \n",
-        "    except Exception as e:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"❌ [generate_summary] 生成摘要失败: {str(e)}\")\n",
-        "        \n",
-        "        return {\n",
-        "            \"success\": True,\n",
-        "            \"summary\": f\"查询执行完成,共返回数据。\"\n",
-        "        }\n",
-        "\n",
-        "print(\"✅ generate_summary 工具已定义\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 5. 创建 ReAct Agent\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 15,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "✅ 工具列表已准备\n",
-            "可用工具: ['generate_sql', 'valid_sql', 'run_sql', 'generate_summary']\n"
-          ]
-        }
-      ],
-      "source": [
-        "# 系统提示词\n",
-        "SYSTEM_MESSAGE = f\"\"\"\n",
-        "你是一个智能数据查询助手,可以帮助用户查询数据库信息,也可以回答一般性问题。\n",
-        "\n",
-        "{DATABASE_SCOPE}\n",
-        "\n",
-        "=== 工作流程 ===\n",
-        "1. 判断问题类型:\n",
-        "   - 如果问题涉及上述业务数据,使用工具查询数据库\n",
-        "   - 如果是常识性问题(如\"荔枝几月上市\"),直接用你的知识回答\n",
-        "\n",
-        "2. 数据库查询流程:\n",
-        "   a) 使用 generate_sql 生成SQL\n",
-        "   b) 使用 valid_sql 验证SQL\n",
-        "   c) 如果验证通过,使用 run_sql 执行SQL\n",
-        "   d) 使用 generate_summary 生成结果摘要\n",
-        "\n",
-        "3. 错误处理:\n",
-        "   - 如果无法生成SQL,说明数据库中可能没有相关数据\n",
-        "   - 对于边界问题,可以先尝试查询,如果失败则用常识回答\n",
-        "\n",
-        "4. 注意事项:\n",
-        "   - 每个工具调用都要检查返回的 success 字段\n",
-        "   - 如果工具调用失败,根据 error 信息决定下一步\n",
-        "   - 避免重复调用相同的工具超过2次\n",
-        "\n",
-        "请根据用户问题,智能选择合适的处理方式。\n",
-        "\"\"\"\n",
-        "\n",
-        "# 创建工具列表\n",
-        "tools = [generate_sql, valid_sql, run_sql, generate_summary]\n",
-        "\n",
-        "print(\"✅ 工具列表已准备\")\n",
-        "print(f\"可用工具: {[tool.name for tool in tools]}\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "## 5.0 重新创建Agent(使用修复后的LLM)\n",
-        "\n",
-        "# 重新创建Agent\n",
-        "print(\"🔄 重新创建Agent...\")\n",
-        "\n",
-        "agent = None\n",
-        "success_method = None\n",
-        "\n",
-        "# 基础创建(系统消息将在调用时处理)\n",
-        "try:\n",
-        "    agent = create_react_agent(\n",
-        "        llm,\n",
-        "        tools=tools\n",
-        "    )\n",
-        "    success_method = \"基础创建(系统消息将在调用时处理)\"\n",
-        "    print(\"✅ ReAct Agent 重新创建成功\")\n",
-        "    print(\"⚠️  注意:系统消息将在每次调用时手动添加\")\n",
-        "except Exception as e3:\n",
-        "    print(f\"❌ Agent创建失败: {e3}\")\n",
-        "    raise Exception(\"无法创建 ReAct Agent\")\n",
-        "\n",
-        "print(f\"🎯 成功方法: {success_method}\")\n",
-        "print(f\"📋 Agent 类型: {type(agent)}\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 17,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "方法1失败: create_react_agent() got an unexpected keyword argument 'system_message'\n",
-            "方法2失败: create_react_agent() got an unexpected keyword argument 'state_modifier'\n",
-            "✅ ReAct Agent 创建成功(方法3:基础创建)\n",
-            "⚠️  注意:系统消息将在每次调用时手动添加\n",
-            "🎯 成功方法: 基础创建(系统消息将在调用时处理)\n",
-            "📋 Agent 类型: <class 'langgraph.graph.state.CompiledStateGraph'>\n"
-          ]
-        }
-      ],
-      "source": [
-        "# 创建 ReAct Agent\n",
-        "# 尝试多种兼容的方式来设置系统消息\n",
-        "\n",
-        "agent = None\n",
-        "success_method = None\n",
-        "\n",
-        "# 方法1:尝试使用 system_message 参数\n",
-        "try:\n",
-        "    agent = create_react_agent(\n",
-        "        llm,\n",
-        "        tools=tools,\n",
-        "        system_message=SYSTEM_MESSAGE\n",
-        "    )\n",
-        "    success_method = \"system_message 参数\"\n",
-        "    print(\"✅ ReAct Agent 创建成功(方法1:system_message 参数)\")\n",
-        "except Exception as e:\n",
-        "    if VERBOSE:\n",
-        "        print(f\"方法1失败: {e}\")\n",
-        "    \n",
-        "    # 方法2:尝试使用 state_modifier 参数\n",
-        "    try:\n",
-        "        agent = create_react_agent(\n",
-        "            llm,\n",
-        "            tools=tools,\n",
-        "            state_modifier=SYSTEM_MESSAGE\n",
-        "        )\n",
-        "        success_method = \"state_modifier 参数\"\n",
-        "        print(\"✅ ReAct Agent 创建成功(方法2:state_modifier 参数)\")\n",
-        "    except Exception as e2:\n",
-        "        if VERBOSE:\n",
-        "            print(f\"方法2失败: {e2}\")\n",
-        "        \n",
-        "        # 方法3:基础创建(将在调用时处理系统消息)\n",
-        "        try:\n",
-        "            agent = create_react_agent(\n",
-        "                llm,\n",
-        "                tools=tools\n",
-        "            )\n",
-        "            success_method = \"基础创建(系统消息将在调用时处理)\"\n",
-        "            print(\"✅ ReAct Agent 创建成功(方法3:基础创建)\")\n",
-        "            print(\"⚠️  注意:系统消息将在每次调用时手动添加\")\n",
-        "        except Exception as e3:\n",
-        "            print(f\"❌ 所有方法都失败了:\")\n",
-        "            print(f\"  方法1: {e}\")\n",
-        "            print(f\"  方法2: {e2}\")\n",
-        "            print(f\"  方法3: {e3}\")\n",
-        "            raise Exception(\"无法创建 ReAct Agent\")\n",
-        "\n",
-        "print(f\"🎯 成功方法: {success_method}\")\n",
-        "print(f\"📋 Agent 类型: {type(agent)}\")\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": 18,
-      "metadata": {},
-      "outputs": [
-        {
-          "name": "stdout",
-          "output_type": "stream",
-          "text": [
-            "🔍 Agent 状态验证:\n",
-            "   - Agent 实例: True\n",
-            "   - Agent 类型: <class 'langgraph.graph.state.CompiledStateGraph'>\n",
-            "   - 创建方法: 基础创建(系统消息将在调用时处理)\n",
-            "   - 可用工具: 4 个\n",
-            "   - 工具列表: ['generate_sql', 'valid_sql', 'run_sql', 'generate_summary']\n",
-            "   - 基础调用测试: 尝试中...\n",
-            "   - 基础调用测试: ❌ 失败 (Error code: 400 - {'error': {'code': 'invalid_parameter_error', 'param': None, 'message': 'parameter.enable_thinking must be set to false for non-streaming calls', 'type': 'invalid_request_error'}, 'id': 'chatcmpl-9f3d39f8-df01-9096-a0ce-c11c829b0b24', 'request_id': '9f3d39f8-df01-9096-a0ce-c11c829b0b24'})\n",
-            "   - 详细错误:\n",
-            "\n",
-            "==================================================\n",
-            "⚠️  Agent 验证失败,请检查配置\n",
-            "==================================================\n"
-          ]
-        },
-        {
-          "name": "stderr",
-          "output_type": "stream",
-          "text": [
-            "Traceback (most recent call last):\n",
-            "  File \"C:\\Users\\PaulWang\\AppData\\Local\\Temp\\ipykernel_40896\\2734479170.py\", line 24, in verify_agent\n",
-            "    result = agent.invoke({\"messages\": simple_messages}, test_config)\n",
-            "             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\__init__.py\", line 2719, in invoke\n",
-            "    for chunk in self.stream(\n",
-            "                 ^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\__init__.py\", line 2436, in stream\n",
-            "    for _ in runner.tick(\n",
-            "             ^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\runner.py\", line 161, in tick\n",
-            "    run_with_retry(\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\retry.py\", line 40, in run_with_retry\n",
-            "    return task.proc.invoke(task.input, config)\n",
-            "           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 623, in invoke\n",
-            "    input = context.run(step.invoke, input, config, **kwargs)\n",
-            "            ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 370, in invoke\n",
-            "    ret = context.run(self.func, *args, **kwargs)\n",
-            "          ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\prebuilt\\chat_agent_executor.py\", line 505, in call_model\n",
-            "    response = cast(AIMessage, model_runnable.invoke(state, config))\n",
-            "                               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\base.py\", line 3047, in invoke\n",
-            "    input_ = context.run(step.invoke, input_, config)\n",
-            "             ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\base.py\", line 5431, in invoke\n",
-            "    return self.bound.invoke(\n",
-            "           ^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 372, in invoke\n",
-            "    self.generate_prompt(\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 957, in generate_prompt\n",
-            "    return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)\n",
-            "           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 776, in generate\n",
-            "    self._generate_with_cache(\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 1022, in _generate_with_cache\n",
-            "    result = self._generate(\n",
-            "             ^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_openai\\chat_models\\base.py\", line 790, in _generate\n",
-            "    response = self.client.create(**payload)\n",
-            "               ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_utils\\_utils.py\", line 287, in wrapper\n",
-            "    return func(*args, **kwargs)\n",
-            "           ^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py\", line 925, in create\n",
-            "    return self._post(\n",
-            "           ^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1239, in post\n",
-            "    return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n",
-            "                           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
-            "  File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1034, in request\n",
-            "    raise self._make_status_error_from_response(err.response) from None\n",
-            "openai.BadRequestError: Error code: 400 - {'error': {'code': 'invalid_parameter_error', 'param': None, 'message': 'parameter.enable_thinking must be set to false for non-streaming calls', 'type': 'invalid_request_error'}, 'id': 'chatcmpl-9f3d39f8-df01-9096-a0ce-c11c829b0b24', 'request_id': '9f3d39f8-df01-9096-a0ce-c11c829b0b24'}\n",
-            "During task with name 'agent' and id 'e3744fa2-a8df-45fa-2e57-f0e5dec4feb4'\n"
-          ]
-        }
-      ],
-      "source": [
-        "## 5.1 Agent 状态验证\n",
-        "\n",
-        "# 验证Agent是否正常创建\n",
-        "def verify_agent():\n",
-        "    \"\"\"验证Agent状态\"\"\"\n",
-        "    print(\"🔍 Agent 状态验证:\")\n",
-        "    print(f\"   - Agent 实例: {agent is not None}\")\n",
-        "    print(f\"   - Agent 类型: {type(agent)}\")\n",
-        "    print(f\"   - 创建方法: {success_method}\")\n",
-        "    print(f\"   - 可用工具: {len(tools)} 个\")\n",
-        "    print(f\"   - 工具列表: {[tool.name for tool in tools]}\")\n",
-        "    \n",
-        "    # 测试基础功能\n",
-        "    try:\n",
-        "        # 创建一个最简单的消息(包含系统消息,因为我们用的是基础创建)\n",
-        "        simple_messages = [\n",
-        "            SystemMessage(content=\"你是一个有用的助手。\"),\n",
-        "            HumanMessage(content=\"你好,请简单回复。\")\n",
-        "        ]\n",
-        "        test_config = {\"recursion_limit\": 3}\n",
-        "        \n",
-        "        # 尝试调用\n",
-        "        print(\"   - 基础调用测试: 尝试中...\")\n",
-        "        result = agent.invoke({\"messages\": simple_messages}, test_config)\n",
-        "        \n",
-        "        # 检查返回结果\n",
-        "        if result and \"messages\" in result:\n",
-        "            final_message = result[\"messages\"][-1]\n",
-        "            print(f\"   - 基础调用测试: ✅ 成功\")\n",
-        "            print(f\"   - 返回消息类型: {type(final_message).__name__}\")\n",
-        "            print(f\"   - 消息内容预览: {final_message.content[:50]}...\")\n",
-        "            return True\n",
-        "        else:\n",
-        "            print(f\"   - 基础调用测试: ❌ 返回格式异常\")\n",
-        "            return False\n",
-        "            \n",
-        "    except Exception as e:\n",
-        "        print(f\"   - 基础调用测试: ❌ 失败 ({e})\")\n",
-        "        if VERBOSE:\n",
-        "            import traceback\n",
-        "            print(\"   - 详细错误:\")\n",
-        "            traceback.print_exc()\n",
-        "        return False\n",
-        "\n",
-        "# 执行验证\n",
-        "verify_success = verify_agent()\n",
-        "print(f\"\\n{'='*50}\")\n",
-        "if verify_success:\n",
-        "    print(\"🎉 Agent 验证通过,可以开始测试!\")\n",
-        "    print(\"💡 提示:由于使用基础创建方式,每次调用都会包含完整的系统消息\")\n",
-        "else:\n",
-        "    print(\"⚠️  Agent 验证失败,请检查配置\")\n",
-        "print(f\"{'='*50}\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 6. 测试函数\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "def test_agent(question: str, max_iterations: int = None) -> Dict[str, Any]:\n",
-        "    \"\"\"\n",
-        "    测试Agent处理问题\n",
-        "    \n",
-        "    Args:\n",
-        "        question: 用户问题\n",
-        "        max_iterations: 最大迭代次数,默认使用MAX_TOOL_CALLS\n",
-        "    \n",
-        "    Returns:\n",
-        "        处理结果\n",
-        "    \"\"\"\n",
-        "    if max_iterations is None:\n",
-        "        max_iterations = MAX_TOOL_CALLS\n",
-        "    \n",
-        "    print(f\"\\n{'='*60}\")\n",
-        "    print(f\"🤔 问题: {question}\")\n",
-        "    print(f\"⚙️  最大工具调用次数: {max_iterations}\")\n",
-        "    print(f\"⚙️  Agent 创建方法: {success_method}\")\n",
-        "    print(f\"{'='*60}\\n\")\n",
-        "    \n",
-        "    try:\n",
-        "        # 构建消息 - 根据Agent创建方式决定是否包含系统消息\n",
-        "        if success_method == \"基础创建(系统消息将在调用时处理)\":\n",
-        "            # 如果Agent创建时没有系统消息,需要手动添加\n",
-        "            messages = [\n",
-        "                SystemMessage(content=SYSTEM_MESSAGE),\n",
-        "                HumanMessage(content=question)\n",
-        "            ]\n",
-        "        else:\n",
-        "            # 如果Agent创建时已包含系统消息,只需要用户消息\n",
-        "            messages = [\n",
-        "                HumanMessage(content=question)\n",
-        "            ]\n",
-        "        \n",
-        "        # 设置配置,包括递归限制\n",
-        "        config = {\n",
-        "            \"recursion_limit\": max_iterations + 5,  # 额外的缓冲\n",
-        "            \"configurable\": {\n",
-        "                \"thread_id\": f\"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}\"\n",
-        "            }\n",
-        "        }\n",
-        "        \n",
-        "        if VERBOSE:\n",
-        "            print(f\"📝 发送消息数量: {len(messages)}\")\n",
-        "            print(f\"📝 消息类型: {[type(msg).__name__ for msg in messages]}\")\n",
-        "        \n",
-        "        # 调用Agent\n",
-        "        start_time = datetime.now()\n",
-        "        result = agent.invoke({\"messages\": messages}, config=config)\n",
-        "        end_time = datetime.now()\n",
-        "        \n",
-        "        # 提取最终响应\n",
-        "        final_message = result[\"messages\"][-1]\n",
-        "        \n",
-        "        print(f\"\\n{'='*60}\")\n",
-        "        print(f\"✅ 最终答案:\")\n",
-        "        print(f\"{final_message.content}\")\n",
-        "        print(f\"\\n⏱️  处理时间: {(end_time - start_time).total_seconds():.2f} 秒\")\n",
-        "        print(f\"📊 消息数量: {len(result['messages'])}\")\n",
-        "        print(f\"{'='*60}\\n\")\n",
-        "        \n",
-        "        return {\n",
-        "            \"success\": True,\n",
-        "            \"question\": question,\n",
-        "            \"answer\": final_message.content,\n",
-        "            \"messages\": result[\"messages\"],\n",
-        "            \"duration\": (end_time - start_time).total_seconds()\n",
-        "        }\n",
-        "        \n",
-        "    except Exception as e:\n",
-        "        print(f\"\\n❌ 处理失败: {e}\")\n",
-        "        if VERBOSE:\n",
-        "            import traceback\n",
-        "            print(f\"🔍 详细错误信息:\")\n",
-        "            traceback.print_exc()\n",
-        "        return {\n",
-        "            \"success\": False,\n",
-        "            \"question\": question,\n",
-        "            \"error\": str(e)\n",
-        "        }\n",
-        "\n",
-        "print(\"✅ 测试函数已定义\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 7. 执行测试\n",
-        "### 7.1 测试数据库查询"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 测试数据库查询问题\n",
-        "test_questions_db = [\n",
-        "    \"查询所有服务区的名称\",\n",
-        "    \"统计今天的营业额\",\n",
-        "    \"哪个档口的收入最高?\",\n",
-        "    \"昨天的车流量是多少?\"\n",
-        "]\n",
-        "\n",
-        "# 选择一个问题测试(可以修改索引)\n",
-        "result = test_agent(test_questions_db[0], max_iterations=8)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 7.2 测试常识问题\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 测试常识性问题\n",
-        "test_questions_common = [\n",
-        "    \"荔枝几月份上市?\",\n",
-        "    \"今天天气怎么样?\",\n",
-        "    \"Python是什么?\",\n",
-        "    \"如何做番茄炒蛋?\"\n",
-        "]\n",
-        "\n",
-        "# 选择一个问题测试\n",
-        "result = test_agent(test_questions_common[0], max_iterations=5)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 7.3 测试边界问题\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 测试边界问题(可能在数据库中,也可能需要常识)\n",
-        "test_questions_boundary = [\n",
-        "    \"服务区有卖荔枝吗?\",  # 可能需要查询商品表\n",
-        "    \"高速公路什么时候建成的?\",  # 可能没有这个数据\n",
-        "    \"如何联系客服?\",  # 系统相关但可能不在数据库\n",
-        "]\n",
-        "\n",
-        "# 选择一个问题测试\n",
-        "result = test_agent(test_questions_boundary[0], max_iterations=10)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "### 7.4 批量测试\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 批量测试多个问题\n",
-        "def batch_test(questions: List[str], max_iterations: int = None):\n",
-        "    \"\"\"批量测试问题\"\"\"\n",
-        "    results = []\n",
-        "    \n",
-        "    for i, question in enumerate(questions, 1):\n",
-        "        print(f\"\\n🔄 测试 {i}/{len(questions)}: {question}\")\n",
-        "        result = test_agent(question, max_iterations)\n",
-        "        results.append(result)\n",
-        "        \n",
-        "        # 简短总结\n",
-        "        if result[\"success\"]:\n",
-        "            print(f\"✅ 成功,耗时 {result['duration']:.2f} 秒\")\n",
-        "        else:\n",
-        "            print(f\"❌ 失败: {result.get('error', 'Unknown error')}\")\n",
-        "    \n",
-        "    # 统计\n",
-        "    success_count = sum(1 for r in results if r[\"success\"])\n",
-        "    total_time = sum(r.get(\"duration\", 0) for r in results)\n",
-        "    \n",
-        "    print(f\"\\n📊 批量测试完成:\")\n",
-        "    print(f\"   - 成功率: {success_count}/{len(questions)} ({success_count/len(questions)*100:.1f}%)\")\n",
-        "    print(f\"   - 总耗时: {total_time:.2f} 秒\")\n",
-        "    print(f\"   - 平均耗时: {total_time/len(questions):.2f} 秒/问题\")\n",
-        "    \n",
-        "    return results\n",
-        "\n",
-        "# 执行批量测试\n",
-        "all_test_questions = [\n",
-        "    \"查询所有服务区\",\n",
-        "    \"荔枝几月份上市?\",\n",
-        "    \"今天的营业额是多少?\",\n",
-        "    \"Python是什么编程语言?\"\n",
-        "]\n",
-        "\n",
-        "# batch_results = batch_test(all_test_questions, max_iterations=8)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 8. 调试工具\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "def analyze_agent_execution(result: Dict[str, Any]):\n",
-        "    \"\"\"分析Agent执行过程\"\"\"\n",
-        "    if not result.get(\"success\"):\n",
-        "        print(\"❌ 执行失败,无法分析\")\n",
-        "        return\n",
-        "    \n",
-        "    messages = result.get(\"messages\", [])\n",
-        "    \n",
-        "    print(f\"\\n📝 执行过程分析:\")\n",
-        "    print(f\"总消息数: {len(messages)}\")\n",
-        "    \n",
-        "    tool_calls = []\n",
-        "    for i, msg in enumerate(messages):\n",
-        "        if hasattr(msg, 'tool_calls') and msg.tool_calls:\n",
-        "            for tool_call in msg.tool_calls:\n",
-        "                tool_calls.append({\n",
-        "                    \"index\": i,\n",
-        "                    \"tool\": tool_call[\"name\"],\n",
-        "                    \"args\": tool_call.get(\"args\", {})\n",
-        "                })\n",
-        "    \n",
-        "    print(f\"\\n🔧 工具调用序列 (共 {len(tool_calls)} 次):\")\n",
-        "    for tc in tool_calls:\n",
-        "        print(f\"   {tc['index']}. {tc['tool']} - 参数: {tc['args']}\")\n",
-        "    \n",
-        "    # 统计工具使用\n",
-        "    from collections import Counter\n",
-        "    tool_counter = Counter(tc['tool'] for tc in tool_calls)\n",
-        "    \n",
-        "    print(f\"\\n📊 工具使用统计:\")\n",
-        "    for tool, count in tool_counter.items():\n",
-        "        print(f\"   - {tool}: {count} 次\")\n",
-        "\n",
-        "# 使用示例(需要先运行测试)\n",
-        "# analyze_agent_execution(result)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 9. 自定义测试\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 在这里输入您的自定义问题进行测试\n",
-        "custom_question = \"查询今天营业额最高的前3个档口\"\n",
-        "\n",
-        "# 可以调整最大工具调用次数\n",
-        "custom_max_iterations = 10\n",
-        "\n",
-        "# 执行测试\n",
-        "custom_result = test_agent(custom_question, max_iterations=custom_max_iterations)\n",
-        "\n",
-        "# 分析执行过程\n",
-        "if custom_result[\"success\"]:\n",
-        "    analyze_agent_execution(custom_result)\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 10. 总结\n",
-        "\n",
-        "### 实现的功能:\n",
-        "1. ✅ 使用 `create_react_agent()` 创建智能Agent\n",
-        "2. ✅ 实现四个工具:generate_sql, valid_sql, run_sql, generate_summary\n",
-        "3. ✅ Agent能够自主判断是查询数据库还是用常识回答\n",
-        "4. ✅ 支持配置最大工具调用次数,防止无限循环\n",
-        "5. ✅ 对边界问题的处理:先尝试查询,失败则用常识\n",
-        "\n",
-        "### 使用说明:\n",
-        "1. 修改 `DATABASE_SCOPE` 变量来更新数据库业务范围描述\n",
-        "2. 调整 `MAX_TOOL_CALLS` 来控制最大工具调用次数\n",
-        "3. 使用 `test_agent()` 函数测试单个问题\n",
-        "4. 使用 `batch_test()` 批量测试多个问题\n",
-        "5. 使用 `analyze_agent_execution()` 分析执行过程\n",
-        "\n",
-        "### 注意事项:\n",
-        "- 所有代码都在这个notebook中,不影响项目其他部分\n",
-        "- valid_sql 工具是新创建的,从现有代码中提取了验证逻辑\n",
-        "- Agent会根据工具返回的success和error信息智能决策下一步\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 11. 依赖包说明\n",
-        "\n",
-        "### 运行此 notebook 需要的包:\n",
-        "\n",
-        "如果运行时遇到缺包错误,请在您的 `.venv` 环境中安装以下包:\n",
-        "\n",
-        "```bash\n",
-        "# LangChain 和 LangGraph 相关\n",
-        "pip install langchain==0.3.7\n",
-        "pip install langgraph==0.2.53\n",
-        "pip install langchain-openai==0.2.9  # 如果使用OpenAI兼容API\n",
-        "\n",
-        "# 其他可能需要的依赖\n",
-        "pip install pandas  # 如果还没安装\n",
-        "pip install asyncio  # 通常已内置\n",
-        "```\n",
-        "\n",
-        "### 版本兼容性说明:\n",
-        "- 本 notebook 基于 LangChain/LangGraph v0.3.x 开发\n",
-        "- `create_react_agent` 函数在 `langgraph.prebuilt` 模块中\n",
-        "- 如果版本不匹配,可能需要调整导入路径或API用法\n",
-        "\n",
-        "### 常见问题:\n",
-        "1. **ImportError: cannot import name 'create_react_agent'**\n",
-        "   - 确保 langgraph 版本 >= 0.2.0\n",
-        "   - 检查导入路径是否正确\n",
-        "\n",
-        "2. **找不到 Vanna 实例**\n",
-        "   - 确保项目根目录的 common/vanna_instance.py 可以正常导入\n",
-        "   - 检查数据库连接配置\n",
-        "\n",
-        "3. **LLM 调用失败**\n",
-        "   - 检查 app_config.py 中的 LLM 配置\n",
-        "   - 确保 API key 和 endpoint 正确\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 12. 开始使用\n",
-        "\n",
-        "### 快速开始:\n",
-        "1. 确保已激活 `.venv` 环境\n",
-        "2. 运行 Cell 1-5 进行初始化设置\n",
-        "3. 运行 Cell 6-16 创建工具和Agent\n",
-        "4. 运行 Cell 19 定义测试函数\n",
-        "5. 然后可以测试各种问题:\n",
-        "\n",
-        "```python\n",
-        "# 测试示例\n",
-        "test_agent(\"查询今天的营业额\")\n",
-        "test_agent(\"荔枝几月份上市?\")\n",
-        "test_agent(\"哪个服务区车流量最大?\")\n",
-        "```\n",
-        "\n",
-        "祝您测试愉快!🚀\n"
-      ]
-    }
-  ],
-  "metadata": {
-    "kernelspec": {
-      "display_name": ".venv",
-      "language": "python",
-      "name": "python3"
-    },
-    "language_info": {
-      "codemirror_mode": {
-        "name": "ipython",
-        "version": 3
-      },
-      "file_extension": ".py",
-      "mimetype": "text/x-python",
-      "name": "python",
-      "nbconvert_exporter": "python",
-      "pygments_lexer": "ipython3",
-      "version": "3.12.6"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 2
-}

+ 0 - 138
test/vanna_test.ipynb

@@ -1,138 +0,0 @@
-{
-  "cells": [
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "# Vanna Chainlit ChromaDB 测试 Notebook\n",
-        "\n",
-        "这个 Notebook 用于测试项目的各种功能和 API。\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 导入必要的模块\n",
-        "import sys\n",
-        "import os\n",
-        "\n",
-        "# 添加项目根目录到 Python 路径\n",
-        "sys.path.append(os.path.join(os.path.dirname(os.getcwd())))\n",
-        "\n",
-        "print(\"项目路径已添加到 Python 路径\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 1. 测试配置加载\n",
-        "\n",
-        "测试项目的各种配置是否能正常加载。\n"
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 测试配置加载\n",
-        "try:\n",
-        "    import app_config\n",
-        "    print(\"配置加载成功!\")\n",
-        "    print(f\"LLM模型类型: {app_config.LLM_MODEL_TYPE}\")\n",
-        "    print(f\"API LLM模型: {app_config.API_LLM_MODEL}\")\n",
-        "    print(f\"向量数据库类型: {app_config.VECTOR_DB_TYPE}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"配置加载失败: {e}\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 2. 测试数据管道工具\n",
-        "\n",
-        "测试数据管道模块的配置和功能。\n",
-        "ceshi \n",
-        "ceshi "
-      ]
-    },
-    {
-      "cell_type": "code",
-      "execution_count": null,
-      "metadata": {},
-      "outputs": [],
-      "source": [
-        "# 测试数据管道模块\n",
-        "try:\n",
-        "    from data_pipeline.config import SCHEMA_TOOLS_CONFIG\n",
-        "    print(\"数据管道配置加载成功!\")\n",
-        "    print(f\"输出目录: {SCHEMA_TOOLS_CONFIG['output_directory']}\")\n",
-        "    print(f\"最大表数量: {SCHEMA_TOOLS_CONFIG['qs_generation']['max_tables']}\")\n",
-        "except Exception as e:\n",
-        "    print(f\"数据管道配置加载失败: {e}\")\n"
-      ]
-    },
-    {
-      "cell_type": "raw",
-      "metadata": {
-        "vscode": {
-          "languageId": "raw"
-        }
-      },
-      "source": [
-        "## 总结\n",
-        "\n",
-        "这个 Notebook 用于测试项目的各个组件,包括:\n",
-        "- 配置加载\n",
-        "- 数据管道工具\n",
-        "- Vanna 实例创建\n",
-        "- 工具函数\n",
-        "- 日志系统\n",
-        "\n",
-        "可以根据需要添加更多的测试用例。\n",
-        "\n",
-        "### 使用说明\n",
-        "\n",
-        "1. 确保已激活项目的虚拟环境:\n",
-        "   ```bash\n",
-        "   .\\.venv\\Scripts\\Activate.ps1\n",
-        "   ```\n",
-        "\n",
-        "2. 安装 Jupyter(如果尚未安装):\n",
-        "   ```bash\n",
-        "   pip install jupyter\n",
-        "   ```\n",
-        "\n",
-        "3. 启动 Jupyter:\n",
-        "   ```bash\n",
-        "   jupyter notebook\n",
-        "   ```\n"
-      ]
-    }
-  ],
-  "metadata": {
-    "language_info": {
-      "name": "python"
-    }
-  },
-  "nbformat": 4,
-  "nbformat_minor": 2
-}

Some files were not shown because too many files changed in this diff