Skip to content

Commit

Permalink
commit
Browse files Browse the repository at this point in the history
  • Loading branch information
JBedoya-Barbosa committed Nov 29, 2023
1 parent 28442f4 commit 7dd328a
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 2 deletions.
8 changes: 7 additions & 1 deletion backend/src/controllers/chat.js
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,13 @@ async function createPrompt(req, res, next) {
throw new ClientError(403, 'The prompt exceeds the number of tokens you have assigned');
}

const completion = await generateChatCompletion(message, { model: llm.model });
let completion;

try {
completion = await generateChatCompletion(message, { model: llm.model, max_tokens: tokensToSpend });
} catch (err) {
throw new ClientError(403, 'The prompt exceeds the number of tokens you have assigned');
}

const promptTokens = completion.usage.prompt_tokens;

Expand Down
3 changes: 2 additions & 1 deletion backend/src/utils/generateChatCompletion.js
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,15 @@ if (!OPENAI_API_KEY) {
const openai = new OpenAI();

async function generateChatCompletion(content, options) {
const { role = 'user', model } = options || {};
const { role = 'user', model, max_tokens } = options || {};

if (!model) {
throw new Error('The "model" is required');
}

try {
const completion = await openai.chat.completions.create({
max_tokens: max_tokens,
messages: [{ role, content }],
model
});
Expand Down

0 comments on commit 7dd328a

Please sign in to comment.