test_config_utils.py 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. #!/usr/bin/env python3
  2. """
  3. 测试配置工具函数的脚本
  4. 用于验证common/utils.py中的函数是否正常工作
  5. """
  6. def test_config_utils():
  7. """测试配置工具函数"""
  8. try:
  9. from common.utils import (
  10. get_current_embedding_config,
  11. get_current_llm_config,
  12. get_current_vector_db_config,
  13. get_current_model_info,
  14. is_using_ollama_llm,
  15. is_using_ollama_embedding,
  16. is_using_api_llm,
  17. is_using_api_embedding,
  18. print_current_config
  19. )
  20. print("=== 测试配置工具函数 ===")
  21. # 测试模型类型检查函数
  22. print(f"使用Ollama LLM: {is_using_ollama_llm()}")
  23. print(f"使用Ollama Embedding: {is_using_ollama_embedding()}")
  24. print(f"使用API LLM: {is_using_api_llm()}")
  25. print(f"使用API Embedding: {is_using_api_embedding()}")
  26. print()
  27. # 测试配置获取函数
  28. print("=== LLM配置 ===")
  29. llm_config = get_current_llm_config()
  30. for key, value in llm_config.items():
  31. if key == "api_key" and value:
  32. print(f"{key}: {'*' * 8}...{value[-4:]}") # 隐藏API密钥
  33. else:
  34. print(f"{key}: {value}")
  35. print()
  36. print("=== Embedding配置 ===")
  37. embedding_config = get_current_embedding_config()
  38. for key, value in embedding_config.items():
  39. if key == "api_key" and value:
  40. print(f"{key}: {'*' * 8}...{value[-4:]}") # 隐藏API密钥
  41. else:
  42. print(f"{key}: {value}")
  43. print()
  44. print("=== 向量数据库配置 ===")
  45. vector_db_config = get_current_vector_db_config()
  46. for key, value in vector_db_config.items():
  47. if key == "password" and value:
  48. print(f"{key}: {'*' * 8}") # 隐藏密码
  49. else:
  50. print(f"{key}: {value}")
  51. print()
  52. # 测试模型信息摘要
  53. print("=== 模型信息摘要 ===")
  54. model_info = get_current_model_info()
  55. for key, value in model_info.items():
  56. print(f"{key}: {value}")
  57. print()
  58. # 测试打印配置函数
  59. print_current_config()
  60. print("✅ 所有配置工具函数测试通过!")
  61. except Exception as e:
  62. print(f"❌ 测试失败: {e}")
  63. import traceback
  64. traceback.print_exc()
  65. def test_different_configurations():
  66. """测试不同配置组合"""
  67. import app_config
  68. print("\n=== 测试不同配置组合 ===")
  69. # 保存原始配置
  70. original_llm_type = app_config.LLM_MODEL_TYPE
  71. original_embedding_type = app_config.EMBEDDING_MODEL_TYPE
  72. original_llm_name = app_config.LLM_MODEL_NAME
  73. try:
  74. from common.utils import get_current_model_info, print_current_config
  75. # 测试配置1:API LLM + API Embedding
  76. print("\n--- 配置1:API LLM + API Embedding ---")
  77. app_config.LLM_MODEL_TYPE = "api"
  78. app_config.EMBEDDING_MODEL_TYPE = "api"
  79. app_config.LLM_MODEL_NAME = "qwen"
  80. print_current_config()
  81. # 测试配置2:API LLM + Ollama Embedding
  82. print("\n--- 配置2:API LLM + Ollama Embedding ---")
  83. app_config.LLM_MODEL_TYPE = "api"
  84. app_config.EMBEDDING_MODEL_TYPE = "ollama"
  85. app_config.LLM_MODEL_NAME = "deepseek"
  86. print_current_config()
  87. # 测试配置3:Ollama LLM + API Embedding
  88. print("\n--- 配置3:Ollama LLM + API Embedding ---")
  89. app_config.LLM_MODEL_TYPE = "ollama"
  90. app_config.EMBEDDING_MODEL_TYPE = "api"
  91. print_current_config()
  92. # 测试配置4:Ollama LLM + Ollama Embedding
  93. print("\n--- 配置4:Ollama LLM + Ollama Embedding ---")
  94. app_config.LLM_MODEL_TYPE = "ollama"
  95. app_config.EMBEDDING_MODEL_TYPE = "ollama"
  96. print_current_config()
  97. except Exception as e:
  98. print(f"❌ 配置测试失败: {e}")
  99. finally:
  100. # 恢复原始配置
  101. app_config.LLM_MODEL_TYPE = original_llm_type
  102. app_config.EMBEDDING_MODEL_TYPE = original_embedding_type
  103. app_config.LLM_MODEL_NAME = original_llm_name
  104. print("\n--- 恢复原始配置 ---")
  105. print_current_config()
  106. if __name__ == "__main__":
  107. test_config_utils()
  108. test_different_configurations()