|
@@ -1,4263 +0,0 @@
|
|
|
-{
|
|
|
- "cells": [
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "# Vanna Agent Test - 基于 create_react_agent 的实现\n",
|
|
|
- "\n",
|
|
|
- "## 目标\n",
|
|
|
- "使用 LangGraph 的 `create_react_agent()` 创建一个包含四个工具的智能Agent:\n",
|
|
|
- "1. generate_sql - 生成SQL\n",
|
|
|
- "2. valid_sql - 验证SQL\n",
|
|
|
- "3. run_sql - 执行SQL\n",
|
|
|
- "4. generate_summary - 生成摘要\n",
|
|
|
- "\n",
|
|
|
- "## 架构\n",
|
|
|
- "- 三节点结构:Agent节点 → Tools节点 → END节点\n",
|
|
|
- "- Agent自主决策是否需要查询数据库\n",
|
|
|
- "- 对于常识问题直接用LLM回答"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 1. 环境准备和导入"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 1,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "项目根目录: c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\n",
|
|
|
- "common目录存在: True\n",
|
|
|
- "Python路径已更新: True\n",
|
|
|
- "✅ 导入完成\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "# 添加项目根目录到Python路径\n",
|
|
|
- "import sys\n",
|
|
|
- "import os\n",
|
|
|
- "\n",
|
|
|
- "# 方法1: 基于当前notebook文件位置计算项目根目录\n",
|
|
|
- "current_dir = os.path.dirname(os.path.abspath(__file__)) if '__file__' in globals() else os.getcwd()\n",
|
|
|
- "project_root = os.path.dirname(current_dir) # test/ 的上一级就是项目根目录\n",
|
|
|
- "\n",
|
|
|
- "# 方法2: 备用方案,如果方法1失败\n",
|
|
|
- "if not os.path.exists(os.path.join(project_root, 'common')):\n",
|
|
|
- " # 尝试当前工作目录的上一级\n",
|
|
|
- " project_root = os.path.dirname(os.getcwd())\n",
|
|
|
- "\n",
|
|
|
- "# 添加到Python路径\n",
|
|
|
- "if project_root not in sys.path:\n",
|
|
|
- " sys.path.insert(0, project_root)\n",
|
|
|
- "\n",
|
|
|
- "print(f\"项目根目录: {project_root}\")\n",
|
|
|
- "print(f\"common目录存在: {os.path.exists(os.path.join(project_root, 'common'))}\")\n",
|
|
|
- "print(f\"Python路径已更新: {project_root in sys.path}\")\n",
|
|
|
- "\n",
|
|
|
- "# 基础导入\n",
|
|
|
- "from typing import Dict, Any, List, Optional\n",
|
|
|
- "import pandas as pd\n",
|
|
|
- "import re\n",
|
|
|
- "import json\n",
|
|
|
- "from datetime import datetime\n",
|
|
|
- "\n",
|
|
|
- "# LangChain/LangGraph 导入\n",
|
|
|
- "from langchain.tools import tool\n",
|
|
|
- "from langchain_core.messages import HumanMessage, AIMessage, SystemMessage\n",
|
|
|
- "from langgraph.prebuilt import create_react_agent\n",
|
|
|
- "\n",
|
|
|
- "# 项目导入\n",
|
|
|
- "from common.vanna_instance import get_vanna_instance\n",
|
|
|
- "from common.utils import get_current_llm_config\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ 导入完成\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 2,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ 配置参数已设置\n",
|
|
|
- "最大工具调用次数: 10\n",
|
|
|
- "最大返回行数: 200\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "# ========== 可配置参数 ==========\n",
|
|
|
- "\n",
|
|
|
- "# 最大工具调用次数(防止无限循环)\n",
|
|
|
- "MAX_TOOL_CALLS = 10\n",
|
|
|
- "\n",
|
|
|
- "# 最大返回行数\n",
|
|
|
- "MAX_RETURN_ROWS = 200\n",
|
|
|
- "\n",
|
|
|
- "# 是否启用详细日志\n",
|
|
|
- "VERBOSE = True\n",
|
|
|
- "\n",
|
|
|
- "# 数据库业务范围描述(请根据实际情况修改)\n",
|
|
|
- "DATABASE_SCOPE = \"\"\"\n",
|
|
|
- "=== 数据库业务范围 ===\n",
|
|
|
- "本系统是高速公路服务区商业管理系统,包含以下业务数据:\n",
|
|
|
- "\n",
|
|
|
- "核心业务实体:\n",
|
|
|
- "- 服务区(bss_service_area):服务区基础信息、位置、状态\n",
|
|
|
- "- 档口/商铺(bss_business_day_data):档口信息、品类、营业数据\n",
|
|
|
- "- 车流量(bss_car_day_count):按车型统计的日流量数据\n",
|
|
|
- "- 公司信息(bss_company):服务区管理公司\n",
|
|
|
- "\n",
|
|
|
- "关键业务指标:\n",
|
|
|
- "- 支付方式:微信支付、支付宝支付、现金支付等\n",
|
|
|
- "- 营业数据:支付金额、订单数量、营业额、收入统计\n",
|
|
|
- "- 车流统计:按车型的流量分析\n",
|
|
|
- "- 经营分析:餐饮、小吃、便利店等品类收入\n",
|
|
|
- "\n",
|
|
|
- "时间范围:\n",
|
|
|
- "- 数据更新到最近的营业日\n",
|
|
|
- "- 历史数据可追溯到系统上线时间\n",
|
|
|
- "\"\"\"\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ 配置参数已设置\")\n",
|
|
|
- "print(f\"最大工具调用次数: {MAX_TOOL_CALLS}\")\n",
|
|
|
- "print(f\"最大返回行数: {MAX_RETURN_ROWS}\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 3. 获取LLM实例\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 3,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔧 检测到模型: qwen-plus\n",
|
|
|
- "🔧 为模型 qwen-plus 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "def get_llm():\n",
|
|
|
- " \"\"\"获取兼容的LLM实例\"\"\"\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试使用OpenAI兼容的API\n",
|
|
|
- " from langchain_openai import ChatOpenAI\n",
|
|
|
- " from common.utils import get_current_llm_config\n",
|
|
|
- " \n",
|
|
|
- " llm_config = get_current_llm_config()\n",
|
|
|
- " \n",
|
|
|
- " if llm_config.get(\"base_url\") and llm_config.get(\"api_key\"):\n",
|
|
|
- " # 构建参数,确保thinking功能正确设置\n",
|
|
|
- " model_name = llm_config.get(\"model\", \"\").lower()\n",
|
|
|
- " print(f\"🔧 检测到模型: {model_name}\")\n",
|
|
|
- " \n",
|
|
|
- " # 方法1:尝试使用model_kwargs传递参数\n",
|
|
|
- " model_kwargs = {}\n",
|
|
|
- " if \"deepseek\" in model_name or \"qianwen\" in model_name or \"qwen\" in model_name:\n",
|
|
|
- " model_kwargs[\"enable_thinking\"] = False\n",
|
|
|
- " print(f\"🔧 为模型 {model_name} 设置 enable_thinking=False\")\n",
|
|
|
- " \n",
|
|
|
- " llm = ChatOpenAI(\n",
|
|
|
- " base_url=llm_config.get(\"base_url\"),\n",
|
|
|
- " api_key=llm_config.get(\"api_key\"),\n",
|
|
|
- " model=llm_config.get(\"model\"),\n",
|
|
|
- " temperature=llm_config.get(\"temperature\", 0.7),\n",
|
|
|
- " model_kwargs=model_kwargs\n",
|
|
|
- " )\n",
|
|
|
- " print(\"✅ 使用OpenAI兼容API(方法1:model_kwargs)\")\n",
|
|
|
- " return llm\n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " print(f\"⚠️ OpenAI API方法1失败: {e}\")\n",
|
|
|
- " \n",
|
|
|
- " # 方法2:尝试使用extra_body\n",
|
|
|
- " try:\n",
|
|
|
- " from langchain_openai import ChatOpenAI\n",
|
|
|
- " from common.utils import get_current_llm_config\n",
|
|
|
- " \n",
|
|
|
- " llm_config = get_current_llm_config()\n",
|
|
|
- " \n",
|
|
|
- " if llm_config.get(\"base_url\") and llm_config.get(\"api_key\"):\n",
|
|
|
- " model_name = llm_config.get(\"model\", \"\").lower()\n",
|
|
|
- " \n",
|
|
|
- " llm = ChatOpenAI(\n",
|
|
|
- " base_url=llm_config.get(\"base_url\"),\n",
|
|
|
- " api_key=llm_config.get(\"api_key\"),\n",
|
|
|
- " model=llm_config.get(\"model\"),\n",
|
|
|
- " temperature=llm_config.get(\"temperature\", 0.7),\n",
|
|
|
- " extra_body={\"enable_thinking\": False}\n",
|
|
|
- " )\n",
|
|
|
- " print(\"✅ 使用OpenAI兼容API(方法2:extra_body)\")\n",
|
|
|
- " return llm\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"⚠️ OpenAI API方法2失败: {e2}\")\n",
|
|
|
- " \n",
|
|
|
- " # 回退方案:创建一个简单的包装器\n",
|
|
|
- " from langchain_core.language_models import BaseChatModel\n",
|
|
|
- " from langchain_core.messages import BaseMessage, AIMessage\n",
|
|
|
- " from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- " \n",
|
|
|
- " class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking\n",
|
|
|
- " try:\n",
|
|
|
- " # 直接调用项目中的LLM实例,它应该已经正确配置了thinking参数\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 如果不支持enable_thinking参数,使用默认调用\n",
|
|
|
- " try:\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- " \n",
|
|
|
- " print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- " return VannaLLMWrapper()\n",
|
|
|
- "\n",
|
|
|
- "# 获取LLM实例\n",
|
|
|
- "llm = get_llm()\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 4,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:40:13,350 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:40:23,136 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:40:23,137 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:40:23,138 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:40:23,139 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:40:23,139 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:40:23,140 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:40:23,142 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:40:23,142 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:40:23,143 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:40:23,144 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:40:23,393 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:40:23,394 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:40:23,395 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:40:23,396 - vanna.BaseLLMChat - INFO - model: qwen-plus\n",
|
|
|
- "2025-07-08 09:40:23,397 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:40:23,398 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:40:23,398 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO - stream: False\n",
|
|
|
- "2025-07-08 09:40:23,399 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:40:23,400 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:40:23,400 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000024E2E5135C0>\n",
|
|
|
- "2025-07-08 09:40:23,401 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:40:23,402 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:40:24,662 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:40:24,663 - app.VannaSingleton - INFO - Vanna 实例创建成功\n",
|
|
|
- "2025-07-08 09:40:24,668 - vanna.BaseLLMChat - INFO - \n",
|
|
|
- "Using model qwen-plus for 18.5 tokens (approx)\n",
|
|
|
- "2025-07-08 09:40:24,668 - vanna.BaseLLMChat - INFO - Enable thinking: False, Stream mode: False\n",
|
|
|
- "2025-07-08 09:40:24,669 - vanna.BaseLLMChat - INFO - 使用非流式处理模式\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ 使用Vanna LLM包装器\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "🔧 成功禁用thinking和stream\n",
|
|
|
- "✅ LLM测试成功: 测试成功\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " # 使用类配置允许额外字段\n",
|
|
|
- " model_config = {\"extra\": \"allow\"}\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self, **kwargs):\n",
|
|
|
- " super().__init__(**kwargs)\n",
|
|
|
- " # 在初始化后设置vn实例\n",
|
|
|
- " object.__setattr__(self, 'vn', get_vanna_instance())\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 5,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 55\u001b[39m\n\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[5]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:28:50,209 - app.VannaSingleton - INFO - 创建 Vanna 实例...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "2025-07-08 09:29:00,759 - app.ConfigUtils - INFO - === 当前模型配置 ===\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,762 - app.ConfigUtils - INFO - LLM模型: qianwen\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding提供商: api\n",
|
|
|
- "2025-07-08 09:29:00,763 - app.ConfigUtils - INFO - Embedding模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - 向量数据库: pgvector\n",
|
|
|
- "2025-07-08 09:29:00,764 - app.ConfigUtils - INFO - ==================\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 创建QIANWEN+PGVECTOR实例\n",
|
|
|
- "2025-07-08 09:29:00,765 - vanna.VannaFactory - INFO - 已配置使用PgVector,连接字符串: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:00,766 - vanna.VannaFactory - INFO - 已配置使用API嵌入模型: text-embedding-v4\n",
|
|
|
- "2025-07-08 09:29:01,087 - vanna.BaseLLMChat - INFO - 传入的 config 参数如下:\n",
|
|
|
- "2025-07-08 09:29:01,088 - vanna.BaseLLMChat - INFO - api_key: sk-db68e37f00974031935395315bfe07f0\n",
|
|
|
- "2025-07-08 09:29:01,089 - vanna.BaseLLMChat - INFO - base_url: https://dashscope.aliyuncs.com/compatible-mode/v1\n",
|
|
|
- "2025-07-08 09:29:01,090 - vanna.BaseLLMChat - INFO - model: qwen3-235b-a22b\n",
|
|
|
- "2025-07-08 09:29:01,091 - vanna.BaseLLMChat - INFO - allow_llm_to_see_data: True\n",
|
|
|
- "2025-07-08 09:29:01,092 - vanna.BaseLLMChat - INFO - temperature: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,093 - vanna.BaseLLMChat - INFO - n_results: 6\n",
|
|
|
- "2025-07-08 09:29:01,094 - vanna.BaseLLMChat - INFO - language: Chinese\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - stream: True\n",
|
|
|
- "2025-07-08 09:29:01,095 - vanna.BaseLLMChat - INFO - enable_thinking: False\n",
|
|
|
- "2025-07-08 09:29:01,096 - vanna.BaseLLMChat - INFO - connection_string: postgresql://postgres:postgres@192.168.67.1:5432/highway_pgvector_db\n",
|
|
|
- "2025-07-08 09:29:01,097 - vanna.BaseLLMChat - INFO - embedding_function: <core.embedding_function.EmbeddingFunction object at 0x0000018A8D2376B0>\n",
|
|
|
- "2025-07-08 09:29:01,098 - vanna.BaseLLMChat - INFO - temperature is changed to: 0.6\n",
|
|
|
- "2025-07-08 09:29:01,099 - vanna.BaseLLMChat - INFO - QianWenChat init\n",
|
|
|
- "2025-07-08 09:29:02,512 - vanna.VannaFactory - INFO - 已连接到业务数据库: 192.168.67.1:6432/highway_db\n",
|
|
|
- "2025-07-08 09:29:02,513 - app.VannaSingleton - INFO - Vanna 实例创建成功\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "ename": "ValueError",
|
|
|
- "evalue": "\"VannaLLMWrapper\" object has no field \"vn\"",
|
|
|
- "output_type": "error",
|
|
|
- "traceback": [
|
|
|
- "\u001b[31m---------------------------------------------------------------------------\u001b[39m\n",
|
|
|
- "\u001b[31mValueError\u001b[39m Traceback (most recent call last)\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 55\u001b[39m\n",
|
|
|
- "\u001b[32m 52\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33mvanna_wrapper\u001b[39m\u001b[33m\"\u001b[39m\n",
|
|
|
- "\u001b[32m 54\u001b[39m \u001b[38;5;66;03m# 创建LLM实例\u001b[39;00m\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m55\u001b[39m llm = \u001b[43mVannaLLMWrapper\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n",
|
|
|
- "\u001b[32m 56\u001b[39m \u001b[38;5;28mprint\u001b[39m(\u001b[33m\"\u001b[39m\u001b[33m✅ 使用Vanna LLM包装器\u001b[39m\u001b[33m\"\u001b[39m)\n",
|
|
|
- "\u001b[32m 58\u001b[39m \u001b[38;5;66;03m# 测试LLM基础功能\u001b[39;00m\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[26]\u001b[39m\u001b[32m, line 17\u001b[39m, in \u001b[36mVannaLLMWrapper.__init__\u001b[39m\u001b[34m(self)\u001b[39m\n",
|
|
|
- "\u001b[32m 15\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m__init__\u001b[39m(\u001b[38;5;28mself\u001b[39m):\n",
|
|
|
- "\u001b[32m 16\u001b[39m \u001b[38;5;28msuper\u001b[39m().\u001b[34m__init__\u001b[39m()\n",
|
|
|
- "\u001b[32m---> \u001b[39m\u001b[32m17\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mvn\u001b[49m = get_vanna_instance()\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:997\u001b[39m, in \u001b[36mBaseModel.__setattr__\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 995\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value)\n",
|
|
|
- "\u001b[32m 996\u001b[39m \u001b[38;5;66;03m# if None is returned from _setattr_handler, the attribute was set directly\u001b[39;00m\n",
|
|
|
- "\u001b[32m--> \u001b[39m\u001b[32m997\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m (setattr_handler := \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_setattr_handler\u001b[49m\u001b[43m(\u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mvalue\u001b[49m\u001b[43m)\u001b[49m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 998\u001b[39m setattr_handler(\u001b[38;5;28mself\u001b[39m, name, value) \u001b[38;5;66;03m# call here to not memo on possibly unknown fields\u001b[39;00m\n",
|
|
|
- "\u001b[32m 999\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_setattr_handlers__[name] = setattr_handler\n",
|
|
|
- "\n",
|
|
|
- "\u001b[36mFile \u001b[39m\u001b[32mc:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\pydantic\\main.py:1044\u001b[39m, in \u001b[36mBaseModel._setattr_handler\u001b[39m\u001b[34m(self, name, value)\u001b[39m\n",
|
|
|
- "\u001b[32m 1041\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m name \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.__pydantic_fields__:\n",
|
|
|
- "\u001b[32m 1042\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mcls\u001b[39m.model_config.get(\u001b[33m'\u001b[39m\u001b[33mextra\u001b[39m\u001b[33m'\u001b[39m) != \u001b[33m'\u001b[39m\u001b[33mallow\u001b[39m\u001b[33m'\u001b[39m:\n",
|
|
|
- "\u001b[32m 1043\u001b[39m \u001b[38;5;66;03m# TODO - matching error\u001b[39;00m\n",
|
|
|
- "\u001b[32m-> \u001b[39m\u001b[32m1044\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\u001b[33mf\u001b[39m\u001b[33m'\u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[38;5;28mcls\u001b[39m.\u001b[34m__name__\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m object has no field \u001b[39m\u001b[33m\"\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\u001b[33m'\u001b[39m)\n",
|
|
|
- "\u001b[32m 1045\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m attr \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
|
|
|
- "\u001b[32m 1046\u001b[39m \u001b[38;5;66;03m# attribute does not exist, so put it in extra\u001b[39;00m\n",
|
|
|
- "\u001b[32m 1047\u001b[39m \u001b[38;5;28mself\u001b[39m.__pydantic_extra__[name] = value\n",
|
|
|
- "\n",
|
|
|
- "\u001b[31mValueError\u001b[39m: \"VannaLLMWrapper\" object has no field \"vn\""
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 由于ChatOpenAI不支持enable_thinking参数,直接使用Vanna包装器\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "print(\"⚠️ 检测到thinking参数问题,直接使用Vanna包装器...\")\n",
|
|
|
- "\n",
|
|
|
- "# 直接创建Vanna包装器\n",
|
|
|
- "from langchain_core.language_models import BaseChatModel\n",
|
|
|
- "from langchain_core.messages import BaseMessage, AIMessage, SystemMessage, HumanMessage\n",
|
|
|
- "from langchain_core.outputs import ChatResult, ChatGeneration\n",
|
|
|
- "\n",
|
|
|
- "class VannaLLMWrapper(BaseChatModel):\n",
|
|
|
- " \"\"\"Vanna LLM的LangChain包装器\"\"\"\n",
|
|
|
- " \n",
|
|
|
- " def __init__(self):\n",
|
|
|
- " super().__init__()\n",
|
|
|
- " self.vn = get_vanna_instance()\n",
|
|
|
- " \n",
|
|
|
- " def _generate(self, messages: List[BaseMessage], **kwargs) -> ChatResult:\n",
|
|
|
- " # 构建提示词\n",
|
|
|
- " prompt = \"\"\n",
|
|
|
- " for msg in messages:\n",
|
|
|
- " if isinstance(msg, SystemMessage):\n",
|
|
|
- " prompt = msg.content + \"\\n\\n\"\n",
|
|
|
- " elif isinstance(msg, HumanMessage):\n",
|
|
|
- " prompt += f\"用户: {msg.content}\\n\"\n",
|
|
|
- " elif isinstance(msg, AIMessage):\n",
|
|
|
- " prompt += f\"助手: {msg.content}\\n\"\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna,确保禁用thinking和stream\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试禁用thinking和stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, enable_thinking=False, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用thinking和stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " try:\n",
|
|
|
- " # 尝试只禁用stream\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt, stream=False)\n",
|
|
|
- " print(\"🔧 成功禁用stream\")\n",
|
|
|
- " except TypeError:\n",
|
|
|
- " # 最后的备用方案\n",
|
|
|
- " response = self.vn.chat_with_llm(question=prompt)\n",
|
|
|
- " print(\"🔧 使用默认调用\")\n",
|
|
|
- " \n",
|
|
|
- " # 返回结果\n",
|
|
|
- " message = AIMessage(content=response)\n",
|
|
|
- " generation = ChatGeneration(message=message)\n",
|
|
|
- " return ChatResult(generations=[generation])\n",
|
|
|
- " \n",
|
|
|
- " @property\n",
|
|
|
- " def _llm_type(self) -> str:\n",
|
|
|
- " return \"vanna_wrapper\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建LLM实例\n",
|
|
|
- "llm = VannaLLMWrapper()\n",
|
|
|
- "print(\"✅ 使用Vanna LLM包装器\")\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"检查Vanna实例是否正常工作...\")\n",
|
|
|
- " \n",
|
|
|
- " # 直接测试Vanna实例\n",
|
|
|
- " try:\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " direct_response = vn.chat_with_llm(question=\"测试\", stream=False)\n",
|
|
|
- " print(f\"✅ Vanna直接调用成功: {direct_response}\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " print(f\"❌ Vanna直接调用也失败: {e2}\")\n",
|
|
|
- " print(\"请检查您的LLM配置和网络连接\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔄 重新创建LLM实例...\n",
|
|
|
- "🔧 检测到模型: qwen3-235b-a22b\n",
|
|
|
- "🔧 为模型 qwen3-235b-a22b 设置 enable_thinking=False\n",
|
|
|
- "✅ 使用OpenAI兼容API(方法1:model_kwargs)\n",
|
|
|
- "\n",
|
|
|
- "🧪 测试LLM基础功能...\n",
|
|
|
- "❌ LLM测试失败: Completions.create() got an unexpected keyword argument 'enable_thinking'\n",
|
|
|
- "如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 3.1 重新创建LLM实例(解决thinking参数问题)\n",
|
|
|
- "\n",
|
|
|
- "# 重新获取LLM实例\n",
|
|
|
- "print(\"🔄 重新创建LLM实例...\")\n",
|
|
|
- "llm = get_llm()\n",
|
|
|
- "\n",
|
|
|
- "# 测试LLM基础功能\n",
|
|
|
- "print(\"\\n🧪 测试LLM基础功能...\")\n",
|
|
|
- "try:\n",
|
|
|
- " from langchain_core.messages import HumanMessage\n",
|
|
|
- " test_response = llm.invoke([HumanMessage(content=\"请回复'测试成功'\")])\n",
|
|
|
- " print(f\"✅ LLM测试成功: {test_response.content}\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " print(f\"❌ LLM测试失败: {e}\")\n",
|
|
|
- " print(\"如果仍然有thinking错误,请检查您的app_config.py中的LLM配置\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 4. 定义工具函数\n",
|
|
|
- "### 4.1 generate_sql 工具"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 11,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ generate_sql 工具已定义\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "@tool\n",
|
|
|
- "def generate_sql(question: str) -> Dict[str, Any]:\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " 将自然语言问题转换为SQL查询。\n",
|
|
|
- " \n",
|
|
|
- " Args:\n",
|
|
|
- " question: 需要转换为SQL的自然语言问题\n",
|
|
|
- " \n",
|
|
|
- " Returns:\n",
|
|
|
- " 包含SQL生成结果的字典\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " try:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"🔧 [generate_sql] 输入问题: {question}\")\n",
|
|
|
- " \n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " sql = vn.generate_sql(question=question, allow_llm_to_see_data=True)\n",
|
|
|
- " \n",
|
|
|
- " if sql is None:\n",
|
|
|
- " # 检查是否有解释性文本\n",
|
|
|
- " explanation = getattr(vn, 'last_llm_explanation', None)\n",
|
|
|
- " if explanation:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"sql\": None,\n",
|
|
|
- " \"error\": explanation,\n",
|
|
|
- " \"error_type\": \"no_relevant_data\"\n",
|
|
|
- " }\n",
|
|
|
- " else:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"sql\": None,\n",
|
|
|
- " \"error\": \"无法生成SQL查询,可能是问题描述不够明确或数据表结构不匹配\",\n",
|
|
|
- " \"error_type\": \"generation_failed\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"✅ [generate_sql] 生成的SQL: {sql}\")\n",
|
|
|
- " \n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"sql\": sql,\n",
|
|
|
- " \"error\": None\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"sql\": None,\n",
|
|
|
- " \"error\": f\"SQL生成异常: {str(e)}\",\n",
|
|
|
- " \"error_type\": \"exception\"\n",
|
|
|
- " }\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ generate_sql 工具已定义\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 4.2 valid_sql 工具"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 12,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ valid_sql 工具已定义\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "@tool\n",
|
|
|
- "def valid_sql(sql: str) -> Dict[str, Any]:\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " 验证SQL语句的正确性。\n",
|
|
|
- " \n",
|
|
|
- " Args:\n",
|
|
|
- " sql: 要验证的SQL语句\n",
|
|
|
- " \n",
|
|
|
- " Returns:\n",
|
|
|
- " 包含验证结果的字典\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " try:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"🔧 [valid_sql] 验证SQL: {sql[:100]}...\")\n",
|
|
|
- " \n",
|
|
|
- " # 1. 基础格式检查\n",
|
|
|
- " if not sql or not sql.strip():\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"valid\": False,\n",
|
|
|
- " \"error\": \"SQL语句为空\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " # 2. 禁止词检查\n",
|
|
|
- " forbidden_operations = ['UPDATE', 'DELETE', 'DROP', 'ALTER', 'INSERT']\n",
|
|
|
- " sql_upper = sql.upper().strip()\n",
|
|
|
- " \n",
|
|
|
- " for operation in forbidden_operations:\n",
|
|
|
- " if sql_upper.startswith(operation):\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"valid\": False,\n",
|
|
|
- " \"error\": f\"不允许的操作: {operation}。本系统只支持查询操作(SELECT)。\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " # 3. 语法验证(使用EXPLAIN)\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " explain_sql = f\"EXPLAIN {sql}\"\n",
|
|
|
- " \n",
|
|
|
- " try:\n",
|
|
|
- " result = vn.run_sql(explain_sql)\n",
|
|
|
- " if result is not None:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(\"✅ [valid_sql] SQL验证通过\")\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"valid\": True,\n",
|
|
|
- " \"error\": None\n",
|
|
|
- " }\n",
|
|
|
- " else:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"valid\": False,\n",
|
|
|
- " \"error\": \"SQL语法验证失败\"\n",
|
|
|
- " }\n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " error_msg = str(e)\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"❌ [valid_sql] 验证失败: {error_msg}\")\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"valid\": False,\n",
|
|
|
- " \"error\": f\"SQL语法错误: {error_msg}\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"valid\": False,\n",
|
|
|
- " \"error\": f\"验证过程异常: {str(e)}\"\n",
|
|
|
- " }\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ valid_sql 工具已定义\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 4.3 run_sql 工具\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 13,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ run_sql 工具已定义\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "@tool\n",
|
|
|
- "def run_sql(sql: str) -> Dict[str, Any]:\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " 执行SQL查询并返回结果。\n",
|
|
|
- " \n",
|
|
|
- " Args:\n",
|
|
|
- " sql: 要执行的SQL查询语句\n",
|
|
|
- " \n",
|
|
|
- " Returns:\n",
|
|
|
- " 包含查询结果的字典\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " try:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"🔧 [run_sql] 执行SQL: {sql[:100]}...\")\n",
|
|
|
- " \n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " df = vn.run_sql(sql)\n",
|
|
|
- " \n",
|
|
|
- " if df is None:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"data\": None,\n",
|
|
|
- " \"error\": \"SQL执行返回空结果\",\n",
|
|
|
- " \"row_count\": 0\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " if not isinstance(df, pd.DataFrame):\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"data\": None,\n",
|
|
|
- " \"error\": f\"SQL执行返回非DataFrame类型: {type(df)}\",\n",
|
|
|
- " \"row_count\": 0\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " if df.empty:\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"data\": [],\n",
|
|
|
- " \"columns\": list(df.columns),\n",
|
|
|
- " \"row_count\": 0,\n",
|
|
|
- " \"message\": \"查询执行成功,但没有找到符合条件的数据\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " # 处理数据结果\n",
|
|
|
- " total_rows = len(df)\n",
|
|
|
- " limited_df = df.head(MAX_RETURN_ROWS)\n",
|
|
|
- " \n",
|
|
|
- " # 转换为字典格式\n",
|
|
|
- " rows = limited_df.to_dict(orient=\"records\")\n",
|
|
|
- " columns = list(df.columns)\n",
|
|
|
- " \n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"✅ [run_sql] 查询成功,返回 {total_rows} 行数据\")\n",
|
|
|
- " \n",
|
|
|
- " result = {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"data\": rows,\n",
|
|
|
- " \"columns\": columns,\n",
|
|
|
- " \"row_count\": len(rows),\n",
|
|
|
- " \"total_row_count\": total_rows,\n",
|
|
|
- " \"is_limited\": total_rows > MAX_RETURN_ROWS\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " if total_rows > MAX_RETURN_ROWS:\n",
|
|
|
- " result[\"message\"] = f\"共 {total_rows} 行数据,已限制显示前 {MAX_RETURN_ROWS} 行\"\n",
|
|
|
- " \n",
|
|
|
- " return result\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " error_msg = str(e)\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"❌ [run_sql] 执行失败: {error_msg}\")\n",
|
|
|
- " \n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"data\": None,\n",
|
|
|
- " \"error\": f\"SQL执行失败: {error_msg}\",\n",
|
|
|
- " \"row_count\": 0\n",
|
|
|
- " }\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ run_sql 工具已定义\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 4.4 generate_summary 工具\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 14,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ generate_summary 工具已定义\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "@tool\n",
|
|
|
- "def generate_summary(question: str, query_result: str) -> Dict[str, Any]:\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " 为查询结果生成自然语言摘要。\n",
|
|
|
- " \n",
|
|
|
- " Args:\n",
|
|
|
- " question: 原始问题\n",
|
|
|
- " query_result: 查询结果的JSON字符串\n",
|
|
|
- " \n",
|
|
|
- " Returns:\n",
|
|
|
- " 包含摘要结果的字典\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " try:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"🔧 [generate_summary] 为问题生成摘要: {question}\")\n",
|
|
|
- " \n",
|
|
|
- " # 解析查询结果\n",
|
|
|
- " try:\n",
|
|
|
- " result_data = json.loads(query_result) if isinstance(query_result, str) else query_result\n",
|
|
|
- " except:\n",
|
|
|
- " result_data = {\"error\": \"无法解析查询结果\"}\n",
|
|
|
- " \n",
|
|
|
- " # 检查是否有数据\n",
|
|
|
- " if not result_data.get(\"success\") or not result_data.get(\"data\"):\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"summary\": \"查询执行完成,但没有找到符合条件的数据。\"\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " # 重构DataFrame用于摘要生成\n",
|
|
|
- " rows = result_data.get(\"data\", [])\n",
|
|
|
- " columns = result_data.get(\"columns\", [])\n",
|
|
|
- " \n",
|
|
|
- " if rows and columns:\n",
|
|
|
- " df = pd.DataFrame(rows, columns=columns)\n",
|
|
|
- " \n",
|
|
|
- " # 调用Vanna生成摘要\n",
|
|
|
- " vn = get_vanna_instance()\n",
|
|
|
- " summary = vn.generate_summary(question=question, df=df)\n",
|
|
|
- " \n",
|
|
|
- " if summary:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"✅ [generate_summary] 摘要生成成功\")\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"summary\": summary\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " # 生成默认摘要\n",
|
|
|
- " row_count = result_data.get(\"row_count\", 0)\n",
|
|
|
- " summary = f\"根据您的问题『{question}』,查询返回了 {row_count} 条记录。\"\n",
|
|
|
- " \n",
|
|
|
- " if columns:\n",
|
|
|
- " summary += f\"数据包含以下字段:{', '.join(columns[:5])}\" \n",
|
|
|
- " if len(columns) > 5:\n",
|
|
|
- " summary += f\"等{len(columns)}个字段。\"\n",
|
|
|
- " else:\n",
|
|
|
- " summary += \"。\"\n",
|
|
|
- " \n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"summary\": summary\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"❌ [generate_summary] 生成摘要失败: {str(e)}\")\n",
|
|
|
- " \n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"summary\": f\"查询执行完成,共返回数据。\"\n",
|
|
|
- " }\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ generate_summary 工具已定义\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 5. 创建 ReAct Agent\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 15,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "✅ 工具列表已准备\n",
|
|
|
- "可用工具: ['generate_sql', 'valid_sql', 'run_sql', 'generate_summary']\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "# 系统提示词\n",
|
|
|
- "SYSTEM_MESSAGE = f\"\"\"\n",
|
|
|
- "你是一个智能数据查询助手,可以帮助用户查询数据库信息,也可以回答一般性问题。\n",
|
|
|
- "\n",
|
|
|
- "{DATABASE_SCOPE}\n",
|
|
|
- "\n",
|
|
|
- "=== 工作流程 ===\n",
|
|
|
- "1. 判断问题类型:\n",
|
|
|
- " - 如果问题涉及上述业务数据,使用工具查询数据库\n",
|
|
|
- " - 如果是常识性问题(如\"荔枝几月上市\"),直接用你的知识回答\n",
|
|
|
- "\n",
|
|
|
- "2. 数据库查询流程:\n",
|
|
|
- " a) 使用 generate_sql 生成SQL\n",
|
|
|
- " b) 使用 valid_sql 验证SQL\n",
|
|
|
- " c) 如果验证通过,使用 run_sql 执行SQL\n",
|
|
|
- " d) 使用 generate_summary 生成结果摘要\n",
|
|
|
- "\n",
|
|
|
- "3. 错误处理:\n",
|
|
|
- " - 如果无法生成SQL,说明数据库中可能没有相关数据\n",
|
|
|
- " - 对于边界问题,可以先尝试查询,如果失败则用常识回答\n",
|
|
|
- "\n",
|
|
|
- "4. 注意事项:\n",
|
|
|
- " - 每个工具调用都要检查返回的 success 字段\n",
|
|
|
- " - 如果工具调用失败,根据 error 信息决定下一步\n",
|
|
|
- " - 避免重复调用相同的工具超过2次\n",
|
|
|
- "\n",
|
|
|
- "请根据用户问题,智能选择合适的处理方式。\n",
|
|
|
- "\"\"\"\n",
|
|
|
- "\n",
|
|
|
- "# 创建工具列表\n",
|
|
|
- "tools = [generate_sql, valid_sql, run_sql, generate_summary]\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ 工具列表已准备\")\n",
|
|
|
- "print(f\"可用工具: {[tool.name for tool in tools]}\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "## 5.0 重新创建Agent(使用修复后的LLM)\n",
|
|
|
- "\n",
|
|
|
- "# 重新创建Agent\n",
|
|
|
- "print(\"🔄 重新创建Agent...\")\n",
|
|
|
- "\n",
|
|
|
- "agent = None\n",
|
|
|
- "success_method = None\n",
|
|
|
- "\n",
|
|
|
- "# 基础创建(系统消息将在调用时处理)\n",
|
|
|
- "try:\n",
|
|
|
- " agent = create_react_agent(\n",
|
|
|
- " llm,\n",
|
|
|
- " tools=tools\n",
|
|
|
- " )\n",
|
|
|
- " success_method = \"基础创建(系统消息将在调用时处理)\"\n",
|
|
|
- " print(\"✅ ReAct Agent 重新创建成功\")\n",
|
|
|
- " print(\"⚠️ 注意:系统消息将在每次调用时手动添加\")\n",
|
|
|
- "except Exception as e3:\n",
|
|
|
- " print(f\"❌ Agent创建失败: {e3}\")\n",
|
|
|
- " raise Exception(\"无法创建 ReAct Agent\")\n",
|
|
|
- "\n",
|
|
|
- "print(f\"🎯 成功方法: {success_method}\")\n",
|
|
|
- "print(f\"📋 Agent 类型: {type(agent)}\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 17,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "方法1失败: create_react_agent() got an unexpected keyword argument 'system_message'\n",
|
|
|
- "方法2失败: create_react_agent() got an unexpected keyword argument 'state_modifier'\n",
|
|
|
- "✅ ReAct Agent 创建成功(方法3:基础创建)\n",
|
|
|
- "⚠️ 注意:系统消息将在每次调用时手动添加\n",
|
|
|
- "🎯 成功方法: 基础创建(系统消息将在调用时处理)\n",
|
|
|
- "📋 Agent 类型: <class 'langgraph.graph.state.CompiledStateGraph'>\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "# 创建 ReAct Agent\n",
|
|
|
- "# 尝试多种兼容的方式来设置系统消息\n",
|
|
|
- "\n",
|
|
|
- "agent = None\n",
|
|
|
- "success_method = None\n",
|
|
|
- "\n",
|
|
|
- "# 方法1:尝试使用 system_message 参数\n",
|
|
|
- "try:\n",
|
|
|
- " agent = create_react_agent(\n",
|
|
|
- " llm,\n",
|
|
|
- " tools=tools,\n",
|
|
|
- " system_message=SYSTEM_MESSAGE\n",
|
|
|
- " )\n",
|
|
|
- " success_method = \"system_message 参数\"\n",
|
|
|
- " print(\"✅ ReAct Agent 创建成功(方法1:system_message 参数)\")\n",
|
|
|
- "except Exception as e:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"方法1失败: {e}\")\n",
|
|
|
- " \n",
|
|
|
- " # 方法2:尝试使用 state_modifier 参数\n",
|
|
|
- " try:\n",
|
|
|
- " agent = create_react_agent(\n",
|
|
|
- " llm,\n",
|
|
|
- " tools=tools,\n",
|
|
|
- " state_modifier=SYSTEM_MESSAGE\n",
|
|
|
- " )\n",
|
|
|
- " success_method = \"state_modifier 参数\"\n",
|
|
|
- " print(\"✅ ReAct Agent 创建成功(方法2:state_modifier 参数)\")\n",
|
|
|
- " except Exception as e2:\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"方法2失败: {e2}\")\n",
|
|
|
- " \n",
|
|
|
- " # 方法3:基础创建(将在调用时处理系统消息)\n",
|
|
|
- " try:\n",
|
|
|
- " agent = create_react_agent(\n",
|
|
|
- " llm,\n",
|
|
|
- " tools=tools\n",
|
|
|
- " )\n",
|
|
|
- " success_method = \"基础创建(系统消息将在调用时处理)\"\n",
|
|
|
- " print(\"✅ ReAct Agent 创建成功(方法3:基础创建)\")\n",
|
|
|
- " print(\"⚠️ 注意:系统消息将在每次调用时手动添加\")\n",
|
|
|
- " except Exception as e3:\n",
|
|
|
- " print(f\"❌ 所有方法都失败了:\")\n",
|
|
|
- " print(f\" 方法1: {e}\")\n",
|
|
|
- " print(f\" 方法2: {e2}\")\n",
|
|
|
- " print(f\" 方法3: {e3}\")\n",
|
|
|
- " raise Exception(\"无法创建 ReAct Agent\")\n",
|
|
|
- "\n",
|
|
|
- "print(f\"🎯 成功方法: {success_method}\")\n",
|
|
|
- "print(f\"📋 Agent 类型: {type(agent)}\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": 18,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [
|
|
|
- {
|
|
|
- "name": "stdout",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "🔍 Agent 状态验证:\n",
|
|
|
- " - Agent 实例: True\n",
|
|
|
- " - Agent 类型: <class 'langgraph.graph.state.CompiledStateGraph'>\n",
|
|
|
- " - 创建方法: 基础创建(系统消息将在调用时处理)\n",
|
|
|
- " - 可用工具: 4 个\n",
|
|
|
- " - 工具列表: ['generate_sql', 'valid_sql', 'run_sql', 'generate_summary']\n",
|
|
|
- " - 基础调用测试: 尝试中...\n",
|
|
|
- " - 基础调用测试: ❌ 失败 (Error code: 400 - {'error': {'code': 'invalid_parameter_error', 'param': None, 'message': 'parameter.enable_thinking must be set to false for non-streaming calls', 'type': 'invalid_request_error'}, 'id': 'chatcmpl-9f3d39f8-df01-9096-a0ce-c11c829b0b24', 'request_id': '9f3d39f8-df01-9096-a0ce-c11c829b0b24'})\n",
|
|
|
- " - 详细错误:\n",
|
|
|
- "\n",
|
|
|
- "==================================================\n",
|
|
|
- "⚠️ Agent 验证失败,请检查配置\n",
|
|
|
- "==================================================\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "name": "stderr",
|
|
|
- "output_type": "stream",
|
|
|
- "text": [
|
|
|
- "Traceback (most recent call last):\n",
|
|
|
- " File \"C:\\Users\\PaulWang\\AppData\\Local\\Temp\\ipykernel_40896\\2734479170.py\", line 24, in verify_agent\n",
|
|
|
- " result = agent.invoke({\"messages\": simple_messages}, test_config)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\__init__.py\", line 2719, in invoke\n",
|
|
|
- " for chunk in self.stream(\n",
|
|
|
- " ^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\__init__.py\", line 2436, in stream\n",
|
|
|
- " for _ in runner.tick(\n",
|
|
|
- " ^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\runner.py\", line 161, in tick\n",
|
|
|
- " run_with_retry(\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\pregel\\retry.py\", line 40, in run_with_retry\n",
|
|
|
- " return task.proc.invoke(task.input, config)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 623, in invoke\n",
|
|
|
- " input = context.run(step.invoke, input, config, **kwargs)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\utils\\runnable.py\", line 370, in invoke\n",
|
|
|
- " ret = context.run(self.func, *args, **kwargs)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langgraph\\prebuilt\\chat_agent_executor.py\", line 505, in call_model\n",
|
|
|
- " response = cast(AIMessage, model_runnable.invoke(state, config))\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\base.py\", line 3047, in invoke\n",
|
|
|
- " input_ = context.run(step.invoke, input_, config)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\runnables\\base.py\", line 5431, in invoke\n",
|
|
|
- " return self.bound.invoke(\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 372, in invoke\n",
|
|
|
- " self.generate_prompt(\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 957, in generate_prompt\n",
|
|
|
- " return self.generate(prompt_messages, stop=stop, callbacks=callbacks, **kwargs)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 776, in generate\n",
|
|
|
- " self._generate_with_cache(\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_core\\language_models\\chat_models.py\", line 1022, in _generate_with_cache\n",
|
|
|
- " result = self._generate(\n",
|
|
|
- " ^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\langchain_openai\\chat_models\\base.py\", line 790, in _generate\n",
|
|
|
- " response = self.client.create(**payload)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_utils\\_utils.py\", line 287, in wrapper\n",
|
|
|
- " return func(*args, **kwargs)\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\resources\\chat\\completions\\completions.py\", line 925, in create\n",
|
|
|
- " return self._post(\n",
|
|
|
- " ^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1239, in post\n",
|
|
|
- " return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))\n",
|
|
|
- " ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n",
|
|
|
- " File \"c:\\Projects\\cursor_projects\\Vanna-Chainlit-Chromadb\\.venv\\Lib\\site-packages\\openai\\_base_client.py\", line 1034, in request\n",
|
|
|
- " raise self._make_status_error_from_response(err.response) from None\n",
|
|
|
- "openai.BadRequestError: Error code: 400 - {'error': {'code': 'invalid_parameter_error', 'param': None, 'message': 'parameter.enable_thinking must be set to false for non-streaming calls', 'type': 'invalid_request_error'}, 'id': 'chatcmpl-9f3d39f8-df01-9096-a0ce-c11c829b0b24', 'request_id': '9f3d39f8-df01-9096-a0ce-c11c829b0b24'}\n",
|
|
|
- "During task with name 'agent' and id 'e3744fa2-a8df-45fa-2e57-f0e5dec4feb4'\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "source": [
|
|
|
- "## 5.1 Agent 状态验证\n",
|
|
|
- "\n",
|
|
|
- "# 验证Agent是否正常创建\n",
|
|
|
- "def verify_agent():\n",
|
|
|
- " \"\"\"验证Agent状态\"\"\"\n",
|
|
|
- " print(\"🔍 Agent 状态验证:\")\n",
|
|
|
- " print(f\" - Agent 实例: {agent is not None}\")\n",
|
|
|
- " print(f\" - Agent 类型: {type(agent)}\")\n",
|
|
|
- " print(f\" - 创建方法: {success_method}\")\n",
|
|
|
- " print(f\" - 可用工具: {len(tools)} 个\")\n",
|
|
|
- " print(f\" - 工具列表: {[tool.name for tool in tools]}\")\n",
|
|
|
- " \n",
|
|
|
- " # 测试基础功能\n",
|
|
|
- " try:\n",
|
|
|
- " # 创建一个最简单的消息(包含系统消息,因为我们用的是基础创建)\n",
|
|
|
- " simple_messages = [\n",
|
|
|
- " SystemMessage(content=\"你是一个有用的助手。\"),\n",
|
|
|
- " HumanMessage(content=\"你好,请简单回复。\")\n",
|
|
|
- " ]\n",
|
|
|
- " test_config = {\"recursion_limit\": 3}\n",
|
|
|
- " \n",
|
|
|
- " # 尝试调用\n",
|
|
|
- " print(\" - 基础调用测试: 尝试中...\")\n",
|
|
|
- " result = agent.invoke({\"messages\": simple_messages}, test_config)\n",
|
|
|
- " \n",
|
|
|
- " # 检查返回结果\n",
|
|
|
- " if result and \"messages\" in result:\n",
|
|
|
- " final_message = result[\"messages\"][-1]\n",
|
|
|
- " print(f\" - 基础调用测试: ✅ 成功\")\n",
|
|
|
- " print(f\" - 返回消息类型: {type(final_message).__name__}\")\n",
|
|
|
- " print(f\" - 消息内容预览: {final_message.content[:50]}...\")\n",
|
|
|
- " return True\n",
|
|
|
- " else:\n",
|
|
|
- " print(f\" - 基础调用测试: ❌ 返回格式异常\")\n",
|
|
|
- " return False\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " print(f\" - 基础调用测试: ❌ 失败 ({e})\")\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " import traceback\n",
|
|
|
- " print(\" - 详细错误:\")\n",
|
|
|
- " traceback.print_exc()\n",
|
|
|
- " return False\n",
|
|
|
- "\n",
|
|
|
- "# 执行验证\n",
|
|
|
- "verify_success = verify_agent()\n",
|
|
|
- "print(f\"\\n{'='*50}\")\n",
|
|
|
- "if verify_success:\n",
|
|
|
- " print(\"🎉 Agent 验证通过,可以开始测试!\")\n",
|
|
|
- " print(\"💡 提示:由于使用基础创建方式,每次调用都会包含完整的系统消息\")\n",
|
|
|
- "else:\n",
|
|
|
- " print(\"⚠️ Agent 验证失败,请检查配置\")\n",
|
|
|
- "print(f\"{'='*50}\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 6. 测试函数\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "def test_agent(question: str, max_iterations: int = None) -> Dict[str, Any]:\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " 测试Agent处理问题\n",
|
|
|
- " \n",
|
|
|
- " Args:\n",
|
|
|
- " question: 用户问题\n",
|
|
|
- " max_iterations: 最大迭代次数,默认使用MAX_TOOL_CALLS\n",
|
|
|
- " \n",
|
|
|
- " Returns:\n",
|
|
|
- " 处理结果\n",
|
|
|
- " \"\"\"\n",
|
|
|
- " if max_iterations is None:\n",
|
|
|
- " max_iterations = MAX_TOOL_CALLS\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n{'='*60}\")\n",
|
|
|
- " print(f\"🤔 问题: {question}\")\n",
|
|
|
- " print(f\"⚙️ 最大工具调用次数: {max_iterations}\")\n",
|
|
|
- " print(f\"⚙️ Agent 创建方法: {success_method}\")\n",
|
|
|
- " print(f\"{'='*60}\\n\")\n",
|
|
|
- " \n",
|
|
|
- " try:\n",
|
|
|
- " # 构建消息 - 根据Agent创建方式决定是否包含系统消息\n",
|
|
|
- " if success_method == \"基础创建(系统消息将在调用时处理)\":\n",
|
|
|
- " # 如果Agent创建时没有系统消息,需要手动添加\n",
|
|
|
- " messages = [\n",
|
|
|
- " SystemMessage(content=SYSTEM_MESSAGE),\n",
|
|
|
- " HumanMessage(content=question)\n",
|
|
|
- " ]\n",
|
|
|
- " else:\n",
|
|
|
- " # 如果Agent创建时已包含系统消息,只需要用户消息\n",
|
|
|
- " messages = [\n",
|
|
|
- " HumanMessage(content=question)\n",
|
|
|
- " ]\n",
|
|
|
- " \n",
|
|
|
- " # 设置配置,包括递归限制\n",
|
|
|
- " config = {\n",
|
|
|
- " \"recursion_limit\": max_iterations + 5, # 额外的缓冲\n",
|
|
|
- " \"configurable\": {\n",
|
|
|
- " \"thread_id\": f\"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}\"\n",
|
|
|
- " }\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " print(f\"📝 发送消息数量: {len(messages)}\")\n",
|
|
|
- " print(f\"📝 消息类型: {[type(msg).__name__ for msg in messages]}\")\n",
|
|
|
- " \n",
|
|
|
- " # 调用Agent\n",
|
|
|
- " start_time = datetime.now()\n",
|
|
|
- " result = agent.invoke({\"messages\": messages}, config=config)\n",
|
|
|
- " end_time = datetime.now()\n",
|
|
|
- " \n",
|
|
|
- " # 提取最终响应\n",
|
|
|
- " final_message = result[\"messages\"][-1]\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n{'='*60}\")\n",
|
|
|
- " print(f\"✅ 最终答案:\")\n",
|
|
|
- " print(f\"{final_message.content}\")\n",
|
|
|
- " print(f\"\\n⏱️ 处理时间: {(end_time - start_time).total_seconds():.2f} 秒\")\n",
|
|
|
- " print(f\"📊 消息数量: {len(result['messages'])}\")\n",
|
|
|
- " print(f\"{'='*60}\\n\")\n",
|
|
|
- " \n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": True,\n",
|
|
|
- " \"question\": question,\n",
|
|
|
- " \"answer\": final_message.content,\n",
|
|
|
- " \"messages\": result[\"messages\"],\n",
|
|
|
- " \"duration\": (end_time - start_time).total_seconds()\n",
|
|
|
- " }\n",
|
|
|
- " \n",
|
|
|
- " except Exception as e:\n",
|
|
|
- " print(f\"\\n❌ 处理失败: {e}\")\n",
|
|
|
- " if VERBOSE:\n",
|
|
|
- " import traceback\n",
|
|
|
- " print(f\"🔍 详细错误信息:\")\n",
|
|
|
- " traceback.print_exc()\n",
|
|
|
- " return {\n",
|
|
|
- " \"success\": False,\n",
|
|
|
- " \"question\": question,\n",
|
|
|
- " \"error\": str(e)\n",
|
|
|
- " }\n",
|
|
|
- "\n",
|
|
|
- "print(\"✅ 测试函数已定义\")\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 7. 执行测试\n",
|
|
|
- "### 7.1 测试数据库查询"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "# 测试数据库查询问题\n",
|
|
|
- "test_questions_db = [\n",
|
|
|
- " \"查询所有服务区的名称\",\n",
|
|
|
- " \"统计今天的营业额\",\n",
|
|
|
- " \"哪个档口的收入最高?\",\n",
|
|
|
- " \"昨天的车流量是多少?\"\n",
|
|
|
- "]\n",
|
|
|
- "\n",
|
|
|
- "# 选择一个问题测试(可以修改索引)\n",
|
|
|
- "result = test_agent(test_questions_db[0], max_iterations=8)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 7.2 测试常识问题\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "# 测试常识性问题\n",
|
|
|
- "test_questions_common = [\n",
|
|
|
- " \"荔枝几月份上市?\",\n",
|
|
|
- " \"今天天气怎么样?\",\n",
|
|
|
- " \"Python是什么?\",\n",
|
|
|
- " \"如何做番茄炒蛋?\"\n",
|
|
|
- "]\n",
|
|
|
- "\n",
|
|
|
- "# 选择一个问题测试\n",
|
|
|
- "result = test_agent(test_questions_common[0], max_iterations=5)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 7.3 测试边界问题\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "# 测试边界问题(可能在数据库中,也可能需要常识)\n",
|
|
|
- "test_questions_boundary = [\n",
|
|
|
- " \"服务区有卖荔枝吗?\", # 可能需要查询商品表\n",
|
|
|
- " \"高速公路什么时候建成的?\", # 可能没有这个数据\n",
|
|
|
- " \"如何联系客服?\", # 系统相关但可能不在数据库\n",
|
|
|
- "]\n",
|
|
|
- "\n",
|
|
|
- "# 选择一个问题测试\n",
|
|
|
- "result = test_agent(test_questions_boundary[0], max_iterations=10)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "### 7.4 批量测试\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "# 批量测试多个问题\n",
|
|
|
- "def batch_test(questions: List[str], max_iterations: int = None):\n",
|
|
|
- " \"\"\"批量测试问题\"\"\"\n",
|
|
|
- " results = []\n",
|
|
|
- " \n",
|
|
|
- " for i, question in enumerate(questions, 1):\n",
|
|
|
- " print(f\"\\n🔄 测试 {i}/{len(questions)}: {question}\")\n",
|
|
|
- " result = test_agent(question, max_iterations)\n",
|
|
|
- " results.append(result)\n",
|
|
|
- " \n",
|
|
|
- " # 简短总结\n",
|
|
|
- " if result[\"success\"]:\n",
|
|
|
- " print(f\"✅ 成功,耗时 {result['duration']:.2f} 秒\")\n",
|
|
|
- " else:\n",
|
|
|
- " print(f\"❌ 失败: {result.get('error', 'Unknown error')}\")\n",
|
|
|
- " \n",
|
|
|
- " # 统计\n",
|
|
|
- " success_count = sum(1 for r in results if r[\"success\"])\n",
|
|
|
- " total_time = sum(r.get(\"duration\", 0) for r in results)\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n📊 批量测试完成:\")\n",
|
|
|
- " print(f\" - 成功率: {success_count}/{len(questions)} ({success_count/len(questions)*100:.1f}%)\")\n",
|
|
|
- " print(f\" - 总耗时: {total_time:.2f} 秒\")\n",
|
|
|
- " print(f\" - 平均耗时: {total_time/len(questions):.2f} 秒/问题\")\n",
|
|
|
- " \n",
|
|
|
- " return results\n",
|
|
|
- "\n",
|
|
|
- "# 执行批量测试\n",
|
|
|
- "all_test_questions = [\n",
|
|
|
- " \"查询所有服务区\",\n",
|
|
|
- " \"荔枝几月份上市?\",\n",
|
|
|
- " \"今天的营业额是多少?\",\n",
|
|
|
- " \"Python是什么编程语言?\"\n",
|
|
|
- "]\n",
|
|
|
- "\n",
|
|
|
- "# batch_results = batch_test(all_test_questions, max_iterations=8)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 8. 调试工具\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "def analyze_agent_execution(result: Dict[str, Any]):\n",
|
|
|
- " \"\"\"分析Agent执行过程\"\"\"\n",
|
|
|
- " if not result.get(\"success\"):\n",
|
|
|
- " print(\"❌ 执行失败,无法分析\")\n",
|
|
|
- " return\n",
|
|
|
- " \n",
|
|
|
- " messages = result.get(\"messages\", [])\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n📝 执行过程分析:\")\n",
|
|
|
- " print(f\"总消息数: {len(messages)}\")\n",
|
|
|
- " \n",
|
|
|
- " tool_calls = []\n",
|
|
|
- " for i, msg in enumerate(messages):\n",
|
|
|
- " if hasattr(msg, 'tool_calls') and msg.tool_calls:\n",
|
|
|
- " for tool_call in msg.tool_calls:\n",
|
|
|
- " tool_calls.append({\n",
|
|
|
- " \"index\": i,\n",
|
|
|
- " \"tool\": tool_call[\"name\"],\n",
|
|
|
- " \"args\": tool_call.get(\"args\", {})\n",
|
|
|
- " })\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n🔧 工具调用序列 (共 {len(tool_calls)} 次):\")\n",
|
|
|
- " for tc in tool_calls:\n",
|
|
|
- " print(f\" {tc['index']}. {tc['tool']} - 参数: {tc['args']}\")\n",
|
|
|
- " \n",
|
|
|
- " # 统计工具使用\n",
|
|
|
- " from collections import Counter\n",
|
|
|
- " tool_counter = Counter(tc['tool'] for tc in tool_calls)\n",
|
|
|
- " \n",
|
|
|
- " print(f\"\\n📊 工具使用统计:\")\n",
|
|
|
- " for tool, count in tool_counter.items():\n",
|
|
|
- " print(f\" - {tool}: {count} 次\")\n",
|
|
|
- "\n",
|
|
|
- "# 使用示例(需要先运行测试)\n",
|
|
|
- "# analyze_agent_execution(result)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 9. 自定义测试\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "code",
|
|
|
- "execution_count": null,
|
|
|
- "metadata": {},
|
|
|
- "outputs": [],
|
|
|
- "source": [
|
|
|
- "# 在这里输入您的自定义问题进行测试\n",
|
|
|
- "custom_question = \"查询今天营业额最高的前3个档口\"\n",
|
|
|
- "\n",
|
|
|
- "# 可以调整最大工具调用次数\n",
|
|
|
- "custom_max_iterations = 10\n",
|
|
|
- "\n",
|
|
|
- "# 执行测试\n",
|
|
|
- "custom_result = test_agent(custom_question, max_iterations=custom_max_iterations)\n",
|
|
|
- "\n",
|
|
|
- "# 分析执行过程\n",
|
|
|
- "if custom_result[\"success\"]:\n",
|
|
|
- " analyze_agent_execution(custom_result)\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 10. 总结\n",
|
|
|
- "\n",
|
|
|
- "### 实现的功能:\n",
|
|
|
- "1. ✅ 使用 `create_react_agent()` 创建智能Agent\n",
|
|
|
- "2. ✅ 实现四个工具:generate_sql, valid_sql, run_sql, generate_summary\n",
|
|
|
- "3. ✅ Agent能够自主判断是查询数据库还是用常识回答\n",
|
|
|
- "4. ✅ 支持配置最大工具调用次数,防止无限循环\n",
|
|
|
- "5. ✅ 对边界问题的处理:先尝试查询,失败则用常识\n",
|
|
|
- "\n",
|
|
|
- "### 使用说明:\n",
|
|
|
- "1. 修改 `DATABASE_SCOPE` 变量来更新数据库业务范围描述\n",
|
|
|
- "2. 调整 `MAX_TOOL_CALLS` 来控制最大工具调用次数\n",
|
|
|
- "3. 使用 `test_agent()` 函数测试单个问题\n",
|
|
|
- "4. 使用 `batch_test()` 批量测试多个问题\n",
|
|
|
- "5. 使用 `analyze_agent_execution()` 分析执行过程\n",
|
|
|
- "\n",
|
|
|
- "### 注意事项:\n",
|
|
|
- "- 所有代码都在这个notebook中,不影响项目其他部分\n",
|
|
|
- "- valid_sql 工具是新创建的,从现有代码中提取了验证逻辑\n",
|
|
|
- "- Agent会根据工具返回的success和error信息智能决策下一步\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 11. 依赖包说明\n",
|
|
|
- "\n",
|
|
|
- "### 运行此 notebook 需要的包:\n",
|
|
|
- "\n",
|
|
|
- "如果运行时遇到缺包错误,请在您的 `.venv` 环境中安装以下包:\n",
|
|
|
- "\n",
|
|
|
- "```bash\n",
|
|
|
- "# LangChain 和 LangGraph 相关\n",
|
|
|
- "pip install langchain==0.3.7\n",
|
|
|
- "pip install langgraph==0.2.53\n",
|
|
|
- "pip install langchain-openai==0.2.9 # 如果使用OpenAI兼容API\n",
|
|
|
- "\n",
|
|
|
- "# 其他可能需要的依赖\n",
|
|
|
- "pip install pandas # 如果还没安装\n",
|
|
|
- "pip install asyncio # 通常已内置\n",
|
|
|
- "```\n",
|
|
|
- "\n",
|
|
|
- "### 版本兼容性说明:\n",
|
|
|
- "- 本 notebook 基于 LangChain/LangGraph v0.3.x 开发\n",
|
|
|
- "- `create_react_agent` 函数在 `langgraph.prebuilt` 模块中\n",
|
|
|
- "- 如果版本不匹配,可能需要调整导入路径或API用法\n",
|
|
|
- "\n",
|
|
|
- "### 常见问题:\n",
|
|
|
- "1. **ImportError: cannot import name 'create_react_agent'**\n",
|
|
|
- " - 确保 langgraph 版本 >= 0.2.0\n",
|
|
|
- " - 检查导入路径是否正确\n",
|
|
|
- "\n",
|
|
|
- "2. **找不到 Vanna 实例**\n",
|
|
|
- " - 确保项目根目录的 common/vanna_instance.py 可以正常导入\n",
|
|
|
- " - 检查数据库连接配置\n",
|
|
|
- "\n",
|
|
|
- "3. **LLM 调用失败**\n",
|
|
|
- " - 检查 app_config.py 中的 LLM 配置\n",
|
|
|
- " - 确保 API key 和 endpoint 正确\n"
|
|
|
- ]
|
|
|
- },
|
|
|
- {
|
|
|
- "cell_type": "raw",
|
|
|
- "metadata": {
|
|
|
- "vscode": {
|
|
|
- "languageId": "raw"
|
|
|
- }
|
|
|
- },
|
|
|
- "source": [
|
|
|
- "## 12. 开始使用\n",
|
|
|
- "\n",
|
|
|
- "### 快速开始:\n",
|
|
|
- "1. 确保已激活 `.venv` 环境\n",
|
|
|
- "2. 运行 Cell 1-5 进行初始化设置\n",
|
|
|
- "3. 运行 Cell 6-16 创建工具和Agent\n",
|
|
|
- "4. 运行 Cell 19 定义测试函数\n",
|
|
|
- "5. 然后可以测试各种问题:\n",
|
|
|
- "\n",
|
|
|
- "```python\n",
|
|
|
- "# 测试示例\n",
|
|
|
- "test_agent(\"查询今天的营业额\")\n",
|
|
|
- "test_agent(\"荔枝几月份上市?\")\n",
|
|
|
- "test_agent(\"哪个服务区车流量最大?\")\n",
|
|
|
- "```\n",
|
|
|
- "\n",
|
|
|
- "祝您测试愉快!🚀\n"
|
|
|
- ]
|
|
|
- }
|
|
|
- ],
|
|
|
- "metadata": {
|
|
|
- "kernelspec": {
|
|
|
- "display_name": ".venv",
|
|
|
- "language": "python",
|
|
|
- "name": "python3"
|
|
|
- },
|
|
|
- "language_info": {
|
|
|
- "codemirror_mode": {
|
|
|
- "name": "ipython",
|
|
|
- "version": 3
|
|
|
- },
|
|
|
- "file_extension": ".py",
|
|
|
- "mimetype": "text/x-python",
|
|
|
- "name": "python",
|
|
|
- "nbconvert_exporter": "python",
|
|
|
- "pygments_lexer": "ipython3",
|
|
|
- "version": "3.12.6"
|
|
|
- }
|
|
|
- },
|
|
|
- "nbformat": 4,
|
|
|
- "nbformat_minor": 2
|
|
|
-}
|