Skip to content

Commit

Permalink
feat(bridge): spg server bridge supports config check and run solver (#…
Browse files Browse the repository at this point in the history
…287)

* x

* x (#280)

* bridge add solver

* x

* feat(bridge): spg server bridge (#283)

* x

* bridge add solver

* x

* add invoke

* llm client catch error
  • Loading branch information
zhuzhongshu123 authored Jan 17, 2025
1 parent ca31351 commit deae277
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 4 deletions.
32 changes: 31 additions & 1 deletion kag/bridge/spg_server_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@


def init_kag_config(project_id: str, host_addr: str):

os.environ[KAGConstants.ENV_KAG_PROJECT_ID] = project_id
os.environ[KAGConstants.ENV_KAG_PROJECT_HOST_ADDR] = host_addr
init_env()
Expand Down Expand Up @@ -47,3 +46,34 @@ def run_component(self, component_name, component_config, input_data):
if hasattr(instance.input_types, "from_dict"):
input_data = instance.input_types.from_dict(input_data)
return [x.to_dict() for x in instance.invoke(input_data, write_ckpt=False)]

def run_llm_config_check(self, llm_config):
from kag.common.llm.llm_config_checker import LLMConfigChecker

return LLMConfigChecker().check(llm_config)

def run_vectorizer_config_check(self, vec_config):
from kag.common.vectorize_model.vectorize_model_config_checker import (
VectorizeModelConfigChecker,
)

return VectorizeModelConfigChecker().check(vec_config)

def run_solver(
self,
project_id,
task_id,
query,
func_name="invoke",
is_report=True,
host_addr="http://127.0.0.1:8887",
):
from kag.solver.main_solver import SolverMain

return getattr(SolverMain(), func_name)(
project_id=project_id,
task_id=task_id,
query=query,
is_report=is_report,
host_addr=host_addr,
)
6 changes: 3 additions & 3 deletions kag/interface/common/llm_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def invoke(
variables: Dict[str, Any],
prompt_op: PromptABC,
with_json_parse: bool = True,
with_except: bool = True,
with_except: bool = False,
):
"""
Call the model and process the result.
Expand Down Expand Up @@ -109,10 +109,10 @@ def invoke(
except Exception as e:
import traceback

logger.error(f"Error {e} during invocation: {traceback.format_exc()}")
logger.debug(f"Error {e} during invocation: {traceback.format_exc()}")
if with_except:
raise RuntimeError(
f"LLM invoke exception, info: {e}\nllm input: {input}\nllm output: {response}"
f"LLM invoke exception, info: {e}\nllm input: \n{prompt}\nllm output: \n{response}"
)
return result

Expand Down

0 comments on commit deae277

Please sign in to comment.