From 39a0724ef3bf4239296354d875c7bf80a77dfc4a Mon Sep 17 00:00:00 2001 From: Mohammad Saif Khan <63262604+saif78642@users.noreply.github.com> Date: Tue, 28 Jan 2025 23:30:50 +0530 Subject: [PATCH] feat: add Gemini 2.0 Flash-thinking-exp-01-21 model with 65k token support (#1202) Added the new gemini-2.0-flash-thinking-exp-01-21 model to the GoogleProvider's static model configuration. This model supports a significantly increased maxTokenAllowed limit of 65,536 tokens, enabling it to handle larger context windows compared to existing Gemini models (previously capped at 8k tokens). The model is labeled as "Gemini 2.0 Flash-thinking-exp-01-21" for clear identification in the UI/dropdowns. --- app/lib/modules/llm/providers/google.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/app/lib/modules/llm/providers/google.ts b/app/lib/modules/llm/providers/google.ts index edc8e1dd39..b69356c825 100644 --- a/app/lib/modules/llm/providers/google.ts +++ b/app/lib/modules/llm/providers/google.ts @@ -14,6 +14,7 @@ export default class GoogleProvider extends BaseProvider { staticModels: ModelInfo[] = [ { name: 'gemini-1.5-flash-latest', label: 'Gemini 1.5 Flash', provider: 'Google', maxTokenAllowed: 8192 }, + { name: 'gemini-2.0-flash-thinking-exp-01-21', label: 'Gemini 2.0 Flash-thinking-exp-01-21', provider: 'Google', maxTokenAllowed: 65536 }, { name: 'gemini-2.0-flash-exp', label: 'Gemini 2.0 Flash', provider: 'Google', maxTokenAllowed: 8192 }, { name: 'gemini-1.5-flash-002', label: 'Gemini 1.5 Flash-002', provider: 'Google', maxTokenAllowed: 8192 }, { name: 'gemini-1.5-flash-8b', label: 'Gemini 1.5 Flash-8b', provider: 'Google', maxTokenAllowed: 8192 },