diff --git a/letta/schemas/providers.py b/letta/schemas/providers.py index d17494480f..0699af982a 100644 --- a/letta/schemas/providers.py +++ b/letta/schemas/providers.py @@ -614,8 +614,13 @@ def list_llm_models(self) -> List[LLMConfig]: context_window_size = self.get_model_context_window(model_name) model_endpoint = get_azure_chat_completions_endpoint(self.base_url, model_name, self.api_version) configs.append( - LLMConfig(model=model_name, model_endpoint_type="azure", model_endpoint=model_endpoint, context_window=context_window_size), - handle=self.get_handle(model_name), + LLMConfig( + model=model_name, + model_endpoint_type="azure", + model_endpoint=model_endpoint, + context_window=context_window_size, + handle=self.get_handle(model_name), + ), ) return configs diff --git a/letta/server/rest_api/routers/v1/llms.py b/letta/server/rest_api/routers/v1/llms.py index 4536ae4981..2c05aa48cd 100644 --- a/letta/server/rest_api/routers/v1/llms.py +++ b/letta/server/rest_api/routers/v1/llms.py @@ -18,7 +18,7 @@ def list_llm_backends( ): models = server.list_llm_models() - print(models) + # print(models) return models @@ -28,5 +28,5 @@ def list_embedding_backends( ): models = server.list_embedding_models() - print(models) + # print(models) return models