Skip to content

Commit

Permalink
added gpt-4o-mini model
Browse files Browse the repository at this point in the history
  • Loading branch information
giosilvi committed Aug 9, 2024
1 parent 7f531dd commit a5163f8
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 48 deletions.
39 changes: 5 additions & 34 deletions src/gpt3.js
Original file line number Diff line number Diff line change
Expand Up @@ -5,42 +5,16 @@ export const CHAT_API_MODELS = {
"gpt-4": true,
"gpt-3.5-turbo": true,
"gpt-4-turbo": true,
"gpt-4o": true
"gpt-4o": true,
"gpt-4o-mini": true,
};

export const VISION_SUPPORTED_MODELS = {
"gpt-4-turbo": true,
"gpt-4o": true
"gpt-4o": true,
"gpt-4o-mini": true,
}

// For models that have a maximum token limit (input + output tokens per request).
var MaxTokensPerModel = {
"gpt-4": 8000,
"gpt-3.5-turbo": 4000,
"gpt-3.5-turbo-instruct": 4000,
"text-davinci-003": 4000,
"text-davinci-002": 4000,
"text-curie-001": 2000,
"text-babbage-001": 2000,
"text-ada-001": 2000
};

// Note: This is the number of maximum output tokens (not the context window size).
const MaxOutputTokensPerModel = {
"gpt-4o": 4000,
"gpt-4-turbo": 4096
}

const MaxInputTokensPerModel = {
"gpt-4o": 4000,
"gpt-4-turbo": 4096

}

const DECOUPLED_INPUT_OUTPUT_LENGTH_MODELS = {
"gpt-4-turbo": true,
"gpt-4o": true
};

function checkMaxTokens(content, model) {
var tokens = 0;
Expand Down Expand Up @@ -77,10 +51,7 @@ function checkMaxTokens(content, model) {
} else {
tokens = countTokens(content, model);
}
var maxTokens = MaxTokensPerModel[model] - tokens;
if (model in DECOUPLED_INPUT_OUTPUT_LENGTH_MODELS) {
maxTokens = MaxTokensPerModel[model];
}
const maxTokens = 4096 // True for most models
return { maxTokens, tokens };
}

Expand Down
4 changes: 2 additions & 2 deletions src/manifest.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "GPT-Prompter",
"version": "0.0.4.5",
"description": "Fast custom prompts to GPT-3.5, GPT-4 and ChatGPT API",
"version": "0.0.4.6",
"description": "Fast custom prompts to GPT-3.5 and GPT-4 API",
"manifest_version": 3,
"icons": {
"16": "icons/iconA16.png",
Expand Down
8 changes: 7 additions & 1 deletion src/popup.html
Original file line number Diff line number Diff line change
Expand Up @@ -142,11 +142,18 @@ <h5>
</div>
<select class="custom-select" id="inputmodel">
<option
selected
value="gpt-4o"
title="GPT-4 Omni, the most intelligent model from OpenAI as of May 13th, 2024. ($0.015/ 1k tokens). May struggle with conceptually complex or novel tasks (more well-suited to low-level math or programming)."
>
🅾 gpt-4o
</option>
<option
value="gpt-4o-mini"
title="GPT-4o mini, affordable and intelligent small model for fast, lightweight tasks. GPT-4o mini is cheaper and more capable than GPT-3.5 Turbo."
>
🪶 gpt-4o-mini
</option>
<option
value="gpt-4-turbo"
title="GPT-4 Turbo, an speed and quality upgrade to GPT-4. ($0.03/ 1k tokens)"
Expand All @@ -160,7 +167,6 @@ <h5>
❹ gpt-4
</option>
<option
selected
value="gpt-3.5-turbo"
title="ChatGPT model, 1/10 cost of davinci ($0.002/ 1k tokens)"
>
Expand Down
20 changes: 10 additions & 10 deletions src/popup_world.js
Original file line number Diff line number Diff line change
Expand Up @@ -12,26 +12,20 @@ function symbolFromModel(model) {
}

// const highlightColor = "#d2f4d3";//"rgb(16, 163, 255)";
const Gpt4oMiniCost = 0.0006 / 1000;
const Gpt4oCost = 0.015 / 1000;
const Gpt4TurboCost = 0.03 / 1000;
const Gpt4Cost8kCompl = 0.06 / 1000;
const ChatGPTCost = 0.002 / 1000;
// const DaVinciCost = 0.02 / 1000;
// const CurieCost = 0.002 / 1000;
// const BabbageCost = 0.0005 / 1000;
// const AdaCost = 0.0004 / 1000;


function computeCost(tokens, model) {
var cost = 0;
// if (model == "text-davinci-003") cost = tokens * DaVinciCost;
// else if (model == "text-davinci-002") cost = tokens * DaVinciCost;
// else if (model == "text-curie-001") cost = tokens * CurieCost;
// else if (model == "text-babbage-001") cost = tokens * BabbageCost;
// else if (model == "text-ada-001") cost = tokens * AdaCost;
if (model == "gpt-3.5-turbo") cost = tokens * ChatGPTCost;
else if (model == "gpt-4") cost = tokens * Gpt4Cost8kCompl;
else if (model == "gpt-4-turbo") cost = tokens * Gpt4TurboCost;
else if (model == "gpt-4o") cost = tokens * Gpt4oCost;
else if (model == "gpt-4o-mini") cost = tokens * Gpt4oMiniCost;
return cost.toFixed(5);
}

Expand Down Expand Up @@ -1366,6 +1360,9 @@ class popUpClass extends HTMLElement {
element.bodyData.model = "gpt-4o";
symbolElement.innerHTML = models["gpt-4o"];
} else if (model == "gpt-4o") {
element.bodyData.model = "gpt-4o-mini";
symbolElement.innerHTML = models["gpt-4o-mini"];
} else if (model == "gpt-4o-mini") {
element.bodyData.model = "gpt-3.5-turbo";
symbolElement.innerHTML = models["gpt-3.5-turbo"];
} else if (model === "gpt-3.5-turbo") {
Expand Down Expand Up @@ -1403,7 +1400,10 @@ class popUpClass extends HTMLElement {
} else if (model === "gpt-4-turbo") {
element.bodyData.model = "gpt-4o";
symbolElement.innerHTML = models["gpt-4o"];
} else if (model === "gpt-4o") {
} else if (model == "gpt-4o") {
element.bodyData.model = "gpt-4o-mini";
symbolElement.innerHTML = models["gpt-4o-mini"];
} else if (model === "gpt-4o-mini") {
element.bodyData.model = "gpt-4";
symbolElement.innerHTML = models["gpt-4"];
} else {
Expand Down
3 changes: 2 additions & 1 deletion src/sharedfunctions.js
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@ export const models = {
"gpt-4-turbo": "🚀",
"gpt-4": "❹",
"gpt-3.5-turbo": "🅶",
"gpt-4o": "🅾"
"gpt-4o": "🅾",
"gpt-4o-mini":"🪶"
};

//the above function symbolFromModel can be rewritten as a dictionary
Expand Down

0 comments on commit a5163f8

Please sign in to comment.