deepseek_chat.py 2.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960
  1. import os
  2. from openai import OpenAI
  3. from .base_llm_chat import BaseLLMChat
  4. class DeepSeekChat(BaseLLMChat):
  5. """DeepSeek AI聊天实现"""
  6. def __init__(self, config=None):
  7. print("...DeepSeekChat init...")
  8. super().__init__(config=config)
  9. if config is None:
  10. self.client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
  11. return
  12. if "api_key" in config:
  13. if "base_url" not in config:
  14. self.client = OpenAI(api_key=config["api_key"], base_url="https://api.deepseek.com")
  15. else:
  16. self.client = OpenAI(api_key=config["api_key"], base_url=config["base_url"])
  17. def submit_prompt(self, prompt, **kwargs) -> str:
  18. if prompt is None:
  19. raise Exception("Prompt is None")
  20. if len(prompt) == 0:
  21. raise Exception("Prompt is empty")
  22. # Count the number of tokens in the message log
  23. num_tokens = 0
  24. for message in prompt:
  25. num_tokens += len(message["content"]) / 4
  26. model = None
  27. if kwargs.get("model", None) is not None:
  28. model = kwargs.get("model", None)
  29. elif kwargs.get("engine", None) is not None:
  30. model = kwargs.get("engine", None)
  31. elif self.config is not None and "engine" in self.config:
  32. model = self.config["engine"]
  33. elif self.config is not None and "model" in self.config:
  34. model = self.config["model"]
  35. else:
  36. if num_tokens > 3500:
  37. model = "deepseek-chat"
  38. else:
  39. model = "deepseek-chat"
  40. print(f"\nUsing model {model} for {num_tokens} tokens (approx)")
  41. # DeepSeek不支持thinking功能,忽略enable_thinking参数
  42. response = self.client.chat.completions.create(
  43. model=model,
  44. messages=prompt,
  45. stop=None,
  46. temperature=self.temperature,
  47. )
  48. return response.choices[0].message.content