diff --git a/lazyllm/module/onlineChatModule/doubaoModule.py b/lazyllm/module/onlineChatModule/doubaoModule.py index 35b24603..b3d9f68a 100644 --- a/lazyllm/module/onlineChatModule/doubaoModule.py +++ b/lazyllm/module/onlineChatModule/doubaoModule.py @@ -8,18 +8,19 @@ class DoubaoModule(OnlineChatModuleBase): def __init__(self, model: str, base_url: str = "https://ark.cn-beijing.volces.com/api/v3", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False): super().__init__(model_type=self.__class__.__name__, api_key=lazyllm.config['doubao_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=[], return_trace=return_trace) + def _get_system_prompt(self): + return "你是人工智能助手豆包。你的任务是针对用户的问题和要求提供适当的答复和支持。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'chat/completions') diff --git a/lazyllm/module/onlineChatModule/glmModule.py b/lazyllm/module/onlineChatModule/glmModule.py index 42514fda..7ef25d5e 100644 --- a/lazyllm/module/onlineChatModule/glmModule.py +++ b/lazyllm/module/onlineChatModule/glmModule.py @@ -12,7 +12,6 @@ class GLMModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://open.bigmodel.cn/api/paas/v4", model: str = "glm-4", - system_prompt: str = "你是一个乐于解答各种问题的助手,你的任务是为用户提供专业、准确、有见地的建议。", stream: str = True, return_trace: bool = False, **kwargs): @@ -22,12 +21,14 @@ def __init__(self, base_url=base_url, model_name=model, stream=stream, - system_prompt=system_prompt, trainable_models=GLMModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, **kwargs) FileHandlerBase.__init__(self) + def _get_system_prompt(self): + return "你是人工智能助手智谱清言(ChatGLM),是基于智谱 AI 公司于2023训练的语言模型开发的。你的任务是针对用户的问题和要求提供适当的答复和支持。" + def _get_models_list(self): return ["glm-4", "glm-4v", "glm-3-turbo", "chatglm-turbo", "cogview-3", "embedding-2", "text-embedding"] diff --git a/lazyllm/module/onlineChatModule/moonshotaiModule.py b/lazyllm/module/onlineChatModule/moonshotaiModule.py index 75ebf4c0..fe70901e 100644 --- a/lazyllm/module/onlineChatModule/moonshotaiModule.py +++ b/lazyllm/module/onlineChatModule/moonshotaiModule.py @@ -7,9 +7,6 @@ class MoonshotAIModule(OnlineChatModuleBase): def __init__(self, base_url="https://api.moonshot.cn", model="moonshot-v1-8k", - system_prompt="你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\ - 你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\ - 黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。", stream=True, return_trace=False, **kwargs): @@ -18,11 +15,15 @@ def __init__(self, api_key=lazyllm.config['moonshotai_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=[], return_trace=return_trace, **kwargs) + def _get_system_prompt(self): + return "你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\ + 你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\ + 黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py index 0ba3b60b..e6bfb614 100644 --- a/lazyllm/module/onlineChatModule/onlineChatModuleBase.py +++ b/lazyllm/module/onlineChatModule/onlineChatModuleBase.py @@ -14,11 +14,9 @@ def __init__(self, api_key: str, base_url: str, model_name: str, - system_prompt: str, stream: bool, trainable_models: List[str], return_trace: bool = False, - prompter: PrompterBase = None, **kwargs): super().__init__(return_trace=return_trace) self._model_type = model_type @@ -27,19 +25,27 @@ def __init__(self, self._api_key = api_key self._base_url = base_url self._model_name = model_name - self.system_prompt(prompt=system_prompt) self._stream = stream self.trainable_mobels = trainable_models self._set_headers() self._set_chat_url() - self._prompt = prompter if prompter else ChatPrompter() + self.prompt() self._is_trained = False - def system_prompt(self, prompt: str = ""): - if len(prompt) > 0: - self._system_prompt = {"role": "system", "content": prompt} + def prompt(self, prompt=None): + if prompt is None: + self._prompt = ChatPrompter() + elif isinstance(prompt, PrompterBase): + self._prompt = prompt + elif isinstance(prompt, str): + self._prompt = ChatPrompter(prompt) else: - self._system_prompt = {"role": "system", "content": "You are a helpful assistant."} + raise TypeError(f"{prompt} type is not supported.") + self._prompt._set_model_configs(system=self._get_system_prompt()) + return self + + def _get_system_prompt(self): + raise NotImplementedError("_get_system_prompt is not implemented.") def _set_headers(self): self._headers = { diff --git a/lazyllm/module/onlineChatModule/openaiModule.py b/lazyllm/module/onlineChatModule/openaiModule.py index f7c0bf5d..9ee80f7f 100644 --- a/lazyllm/module/onlineChatModule/openaiModule.py +++ b/lazyllm/module/onlineChatModule/openaiModule.py @@ -14,7 +14,6 @@ class OpenAIModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://api.openai.com/v1", model: str = "gpt-3.5-turbo", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False, **kwargs): @@ -23,13 +22,15 @@ def __init__(self, api_key=lazyllm.config['openai_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=OpenAIModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, **kwargs) FileHandlerBase.__init__(self) + def _get_system_prompt(self): + return "You are ChatGPT, a large language model trained by OpenAI.You are a helpful assistant." + def _convert_file_format(self, filepath: str) -> str: with open(filepath, 'r', encoding='utf-8') as fr: dataset = [json.loads(line) for line in fr] diff --git a/lazyllm/module/onlineChatModule/qwenModule.py b/lazyllm/module/onlineChatModule/qwenModule.py index def52410..9d07b82f 100644 --- a/lazyllm/module/onlineChatModule/qwenModule.py +++ b/lazyllm/module/onlineChatModule/qwenModule.py @@ -17,7 +17,6 @@ class QwenModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url: str = "https://dashscope.aliyuncs.com", model: str = "qwen-plus", - system_prompt: str = "You are a helpful assistant.", stream: bool = True, return_trace: bool = False, **kwargs): @@ -26,7 +25,6 @@ def __init__(self, api_key=lazyllm.config['qwen_api_key'], base_url=base_url, model_name=model, - system_prompt=system_prompt, stream=stream, trainable_models=QwenModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, @@ -34,6 +32,9 @@ def __init__(self, FileHandlerBase.__init__(self) self._deploy_paramters = None + def _get_system_prompt(self): + return "你是来自阿里云的大规模语言模型,你叫通义千问,你是一个有用的助手。" + def _set_chat_url(self): self._url = os.path.join(self._base_url, 'compatible-mode/v1/chat/completions') diff --git a/lazyllm/module/onlineChatModule/sensenovaModule.py b/lazyllm/module/onlineChatModule/sensenovaModule.py index ab1f430e..ba4566f0 100644 --- a/lazyllm/module/onlineChatModule/sensenovaModule.py +++ b/lazyllm/module/onlineChatModule/sensenovaModule.py @@ -13,7 +13,6 @@ class SenseNovaModule(OnlineChatModuleBase, FileHandlerBase): def __init__(self, base_url="https://api.sensenova.cn/v1/llm", model="SenseChat-5", - system_prompt="You are an AI assistant whose name is InternLM (书生·浦语).", stream=True, return_trace=False, **kwargs): @@ -24,13 +23,15 @@ def __init__(self, base_url=base_url, model_name=model, stream=stream, - system_prompt=system_prompt, trainable_models=SenseNovaModule.TRAINABLE_MODEL_LIST, return_trace=return_trace, **kwargs) FileHandlerBase.__init__(self) self._deploy_paramters = None + def _get_system_prompt(self): + return "You are an AI assistant, developed by SenseTime and released in 2023." + @staticmethod def encode_jwt_token(ak: str, sk: str) -> str: headers = { @@ -63,12 +64,19 @@ def _parse_response_stream(self, response: str) -> str: return chunk def _parse_response_non_stream(self, response: str) -> str: - cur_msg = json.loads(response)['data']["choices"][0] - content = cur_msg.get("message", "") - msg = {"role": cur_msg["role"], "content": content} - cur_msg.pop("role") - cur_msg['message'] = msg - return cur_msg + try: + resp = json.loads(response)['data'] + content = resp["choices"][0].get("message", "") + msg = {"role": resp['choices'][0].pop("role"), "content": content} + resp['choices'][0]['message'] = msg + if 'tool_calls' in resp['choices'][0]: + tool_calls = resp['choices'][0].pop("tool_calls") + resp['choices'][0]['message']['tool_calls'] = tool_calls + resp['model'] = self._model_name + return resp["choices"][0] + except Exception as e: + lazyllm.LOG.error(e) + return "" def _convert_file_format(self, filepath: str) -> None: with open(filepath, 'r', encoding='utf-8') as fr: