llm.py 1.0 KB

123456789101112131415161718192021222324252627282930313233343536
  1. import os
  2. # from langchain_community.chat_models import ChatOpenAI
  3. from langchain_openai import ChatOpenAI
  4. from langchain_community.llms import Tongyi
  5. from langchain_community.chat_models.tongyi import ChatTongyi
  6. class LLM():
  7. def __init__(self, model_name='qwen'):
  8. if model_name == 'deepseek':
  9. self.llm = self.deepseek_llm()
  10. elif model_name == 'qwen':
  11. self.llm = self.qwen_llm()
  12. else:
  13. raise ValueError("Unsupported model name")
  14. def deepseek_llm(self):
  15. llm = ChatOpenAI(
  16. model='deepseek-coder',
  17. openai_api_key='sk-7a15d12a3e254dd0a3408f1544c72da5',
  18. openai_api_base='https://api.deepseek.com',
  19. max_tokens=1024
  20. )
  21. return llm
  22. def qwen_llm(self):
  23. os.environ["DASHSCOPE_API_KEY"] = "sk-86d4622141d74e9a8d7c38ee873c4d91"
  24. llm = ChatTongyi(model='qwen-turbo')
  25. return llm
  26. def get_llm(self):
  27. return self.llm
  28. class QwenTurboTongyi(Tongyi):
  29. mode_name = 'qwen-plus'