From deae27751010af386035532777c6cbd0c9c860df Mon Sep 17 00:00:00 2001 From: zhuzhongshu123 <152354526+zhuzhongshu123@users.noreply.github.com> Date: Fri, 17 Jan 2025 13:52:00 +0800 Subject: [PATCH] feat(bridge): spg server bridge supports config check and run solver (#287) * x * x (#280) * bridge add solver * x * feat(bridge): spg server bridge (#283) * x * bridge add solver * x * add invoke * llm client catch error --- kag/bridge/spg_server_bridge.py | 32 +++++++++++++++++++++++++++++- kag/interface/common/llm_client.py | 6 +++--- 2 files changed, 34 insertions(+), 4 deletions(-) diff --git a/kag/bridge/spg_server_bridge.py b/kag/bridge/spg_server_bridge.py index 7fde8f72..51b0ca25 100644 --- a/kag/bridge/spg_server_bridge.py +++ b/kag/bridge/spg_server_bridge.py @@ -16,7 +16,6 @@ def init_kag_config(project_id: str, host_addr: str): - os.environ[KAGConstants.ENV_KAG_PROJECT_ID] = project_id os.environ[KAGConstants.ENV_KAG_PROJECT_HOST_ADDR] = host_addr init_env() @@ -47,3 +46,34 @@ def run_component(self, component_name, component_config, input_data): if hasattr(instance.input_types, "from_dict"): input_data = instance.input_types.from_dict(input_data) return [x.to_dict() for x in instance.invoke(input_data, write_ckpt=False)] + + def run_llm_config_check(self, llm_config): + from kag.common.llm.llm_config_checker import LLMConfigChecker + + return LLMConfigChecker().check(llm_config) + + def run_vectorizer_config_check(self, vec_config): + from kag.common.vectorize_model.vectorize_model_config_checker import ( + VectorizeModelConfigChecker, + ) + + return VectorizeModelConfigChecker().check(vec_config) + + def run_solver( + self, + project_id, + task_id, + query, + func_name="invoke", + is_report=True, + host_addr="http://127.0.0.1:8887", + ): + from kag.solver.main_solver import SolverMain + + return getattr(SolverMain(), func_name)( + project_id=project_id, + task_id=task_id, + query=query, + is_report=is_report, + host_addr=host_addr, + ) diff --git a/kag/interface/common/llm_client.py b/kag/interface/common/llm_client.py index e6816896..f9571a71 100644 --- a/kag/interface/common/llm_client.py +++ b/kag/interface/common/llm_client.py @@ -77,7 +77,7 @@ def invoke( variables: Dict[str, Any], prompt_op: PromptABC, with_json_parse: bool = True, - with_except: bool = True, + with_except: bool = False, ): """ Call the model and process the result. @@ -109,10 +109,10 @@ def invoke( except Exception as e: import traceback - logger.error(f"Error {e} during invocation: {traceback.format_exc()}") + logger.debug(f"Error {e} during invocation: {traceback.format_exc()}") if with_except: raise RuntimeError( - f"LLM invoke exception, info: {e}\nllm input: {input}\nllm output: {response}" + f"LLM invoke exception, info: {e}\nllm input: \n{prompt}\nllm output: \n{response}" ) return result