diff --git a/basilisk/provider_engine/gemini_engine.py b/basilisk/provider_engine/gemini_engine.py index c280b32f..1c0a091e 100644 --- a/basilisk/provider_engine/gemini_engine.py +++ b/basilisk/provider_engine/gemini_engine.py @@ -45,7 +45,19 @@ def models(self) -> list[ProviderAIModel]: """ Get models """ + # See return [ + ProviderAIModel( + id="gemini-2.0-flash-exp", + # Translators: This is a model description + description=_( + "Next generation features, speed, and multimodal generation for a diverse variety of tasks" + ), + context_window=1048576, + max_output_tokens=8192, + vision=True, + default_temperature=1.0, + ), ProviderAIModel( id="gemini-1.5-flash-latest", # Translators: This is a model description @@ -90,37 +102,6 @@ def models(self) -> list[ProviderAIModel]: vision=True, default_temperature=1.0, ), - ProviderAIModel( - id="gemini-1.0-pro-latest", - # Translators: This is a model description - description=_( - "The best model for scaling across a wide range of tasks. This is the latest model." - ), - context_window=30720, - max_output_tokens=2048, - default_temperature=0.9, - ), - ProviderAIModel( - id="gemini-1.0-pro", - # Translators: This is a model description - description=_( - "The best model for scaling across a wide range of tasks" - ), - context_window=30720, - max_output_tokens=2048, - default_temperature=0.9, - ), - ProviderAIModel( - id="gemini-1.0-pro-vision-latest", - # Translators: This is a model description - description=_( - 'The best image understanding model to handle a broad range of applications' - ), - context_window=12288, - max_output_tokens=4096, - vision=True, - default_temperature=0.4, - ), ] def convert_role(self, role: MessageRoleEnum) -> str: