Skip to content

Commit

Permalink
modify onlineChatModule prompter setting (#3)
Browse files Browse the repository at this point in the history
Co-authored-by: wangjian <[email protected]>
  • Loading branch information
wangjian052163 and wangjian authored Jun 13, 2024
1 parent dec381c commit 8b160d2
Show file tree
Hide file tree
Showing 7 changed files with 47 additions and 28 deletions.
5 changes: 3 additions & 2 deletions lazyllm/module/onlineChatModule/doubaoModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,18 +8,19 @@ class DoubaoModule(OnlineChatModuleBase):
def __init__(self,
model: str,
base_url: str = "https://ark.cn-beijing.volces.com/api/v3",
system_prompt: str = "You are a helpful assistant.",
stream: bool = True,
return_trace: bool = False):
super().__init__(model_type=self.__class__.__name__,
api_key=lazyllm.config['doubao_api_key'],
base_url=base_url,
model_name=model,
system_prompt=system_prompt,
stream=stream,
trainable_models=[],
return_trace=return_trace)

def _get_system_prompt(self):
return "你是人工智能助手豆包。你的任务是针对用户的问题和要求提供适当的答复和支持。"

def _set_chat_url(self):
self._url = os.path.join(self._base_url, 'chat/completions')

Expand Down
5 changes: 3 additions & 2 deletions lazyllm/module/onlineChatModule/glmModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ class GLMModule(OnlineChatModuleBase, FileHandlerBase):
def __init__(self,
base_url: str = "https://open.bigmodel.cn/api/paas/v4",
model: str = "glm-4",
system_prompt: str = "你是一个乐于解答各种问题的助手,你的任务是为用户提供专业、准确、有见地的建议。",
stream: str = True,
return_trace: bool = False,
**kwargs):
Expand All @@ -22,12 +21,14 @@ def __init__(self,
base_url=base_url,
model_name=model,
stream=stream,
system_prompt=system_prompt,
trainable_models=GLMModule.TRAINABLE_MODEL_LIST,
return_trace=return_trace,
**kwargs)
FileHandlerBase.__init__(self)

def _get_system_prompt(self):
return "你是人工智能助手智谱清言(ChatGLM),是基于智谱 AI 公司于2023训练的语言模型开发的。你的任务是针对用户的问题和要求提供适当的答复和支持。"

def _get_models_list(self):
return ["glm-4", "glm-4v", "glm-3-turbo", "chatglm-turbo", "cogview-3", "embedding-2", "text-embedding"]

Expand Down
9 changes: 5 additions & 4 deletions lazyllm/module/onlineChatModule/moonshotaiModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,6 @@ class MoonshotAIModule(OnlineChatModuleBase):
def __init__(self,
base_url="https://api.moonshot.cn",
model="moonshot-v1-8k",
system_prompt="你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\
你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\
黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。",
stream=True,
return_trace=False,
**kwargs):
Expand All @@ -18,11 +15,15 @@ def __init__(self,
api_key=lazyllm.config['moonshotai_api_key'],
base_url=base_url,
model_name=model,
system_prompt=system_prompt,
stream=stream,
trainable_models=[],
return_trace=return_trace,
**kwargs)

def _get_system_prompt(self):
return "你是 Kimi,由 Moonshot AI 提供的人工智能助手,你更擅长中文和英文的对话。\
你会为用户提供安全,有帮助,准确的回答。同时,你会拒绝一切涉及恐怖主义,种族歧视,\
黄色暴力等问题的回答。Moonshot AI 为专有名词,不可翻译成其他语言。"

def _set_chat_url(self):
self._url = os.path.join(self._base_url, 'v1/chat/completions')
22 changes: 14 additions & 8 deletions lazyllm/module/onlineChatModule/onlineChatModuleBase.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,11 +14,9 @@ def __init__(self,
api_key: str,
base_url: str,
model_name: str,
system_prompt: str,
stream: bool,
trainable_models: List[str],
return_trace: bool = False,
prompter: PrompterBase = None,
**kwargs):
super().__init__(return_trace=return_trace)
self._model_type = model_type
Expand All @@ -27,19 +25,27 @@ def __init__(self,
self._api_key = api_key
self._base_url = base_url
self._model_name = model_name
self.system_prompt(prompt=system_prompt)
self._stream = stream
self.trainable_mobels = trainable_models
self._set_headers()
self._set_chat_url()
self._prompt = prompter if prompter else ChatPrompter()
self.prompt()
self._is_trained = False

def system_prompt(self, prompt: str = ""):
if len(prompt) > 0:
self._system_prompt = {"role": "system", "content": prompt}
def prompt(self, prompt=None):
if prompt is None:
self._prompt = ChatPrompter()
elif isinstance(prompt, PrompterBase):
self._prompt = prompt
elif isinstance(prompt, str):
self._prompt = ChatPrompter(prompt)
else:
self._system_prompt = {"role": "system", "content": "You are a helpful assistant."}
raise TypeError(f"{prompt} type is not supported.")
self._prompt._set_model_configs(system=self._get_system_prompt())
return self

def _get_system_prompt(self):
raise NotImplementedError("_get_system_prompt is not implemented.")

def _set_headers(self):
self._headers = {
Expand Down
5 changes: 3 additions & 2 deletions lazyllm/module/onlineChatModule/openaiModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@ class OpenAIModule(OnlineChatModuleBase, FileHandlerBase):
def __init__(self,
base_url: str = "https://api.openai.com/v1",
model: str = "gpt-3.5-turbo",
system_prompt: str = "You are a helpful assistant.",
stream: bool = True,
return_trace: bool = False,
**kwargs):
Expand All @@ -23,13 +22,15 @@ def __init__(self,
api_key=lazyllm.config['openai_api_key'],
base_url=base_url,
model_name=model,
system_prompt=system_prompt,
stream=stream,
trainable_models=OpenAIModule.TRAINABLE_MODEL_LIST,
return_trace=return_trace,
**kwargs)
FileHandlerBase.__init__(self)

def _get_system_prompt(self):
return "You are ChatGPT, a large language model trained by OpenAI.You are a helpful assistant."

def _convert_file_format(self, filepath: str) -> str:
with open(filepath, 'r', encoding='utf-8') as fr:
dataset = [json.loads(line) for line in fr]
Expand Down
5 changes: 3 additions & 2 deletions lazyllm/module/onlineChatModule/qwenModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ class QwenModule(OnlineChatModuleBase, FileHandlerBase):
def __init__(self,
base_url: str = "https://dashscope.aliyuncs.com",
model: str = "qwen-plus",
system_prompt: str = "You are a helpful assistant.",
stream: bool = True,
return_trace: bool = False,
**kwargs):
Expand All @@ -26,14 +25,16 @@ def __init__(self,
api_key=lazyllm.config['qwen_api_key'],
base_url=base_url,
model_name=model,
system_prompt=system_prompt,
stream=stream,
trainable_models=QwenModule.TRAINABLE_MODEL_LIST,
return_trace=return_trace,
**kwargs)
FileHandlerBase.__init__(self)
self._deploy_paramters = None

def _get_system_prompt(self):
return "你是来自阿里云的大规模语言模型,你叫通义千问,你是一个有用的助手。"

def _set_chat_url(self):
self._url = os.path.join(self._base_url, 'compatible-mode/v1/chat/completions')

Expand Down
24 changes: 16 additions & 8 deletions lazyllm/module/onlineChatModule/sensenovaModule.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@ class SenseNovaModule(OnlineChatModuleBase, FileHandlerBase):
def __init__(self,
base_url="https://api.sensenova.cn/v1/llm",
model="SenseChat-5",
system_prompt="You are an AI assistant whose name is InternLM (书生·浦语).",
stream=True,
return_trace=False,
**kwargs):
Expand All @@ -24,13 +23,15 @@ def __init__(self,
base_url=base_url,
model_name=model,
stream=stream,
system_prompt=system_prompt,
trainable_models=SenseNovaModule.TRAINABLE_MODEL_LIST,
return_trace=return_trace,
**kwargs)
FileHandlerBase.__init__(self)
self._deploy_paramters = None

def _get_system_prompt(self):
return "You are an AI assistant, developed by SenseTime and released in 2023."

@staticmethod
def encode_jwt_token(ak: str, sk: str) -> str:
headers = {
Expand Down Expand Up @@ -63,12 +64,19 @@ def _parse_response_stream(self, response: str) -> str:
return chunk

def _parse_response_non_stream(self, response: str) -> str:
cur_msg = json.loads(response)['data']["choices"][0]
content = cur_msg.get("message", "")
msg = {"role": cur_msg["role"], "content": content}
cur_msg.pop("role")
cur_msg['message'] = msg
return cur_msg
try:
resp = json.loads(response)['data']
content = resp["choices"][0].get("message", "")
msg = {"role": resp['choices'][0].pop("role"), "content": content}
resp['choices'][0]['message'] = msg
if 'tool_calls' in resp['choices'][0]:
tool_calls = resp['choices'][0].pop("tool_calls")
resp['choices'][0]['message']['tool_calls'] = tool_calls
resp['model'] = self._model_name
return resp["choices"][0]
except Exception as e:
lazyllm.LOG.error(e)
return ""

def _convert_file_format(self, filepath: str) -> None:
with open(filepath, 'r', encoding='utf-8') as fr:
Expand Down

0 comments on commit 8b160d2

Please sign in to comment.