Skip to content

Commit

Permalink
Remove model type and refactor code for Yorkie Intelligence (#374)
Browse files Browse the repository at this point in the history
* Update backend.env

- change default llm model to llama3.2:1b
- fix typo

* remove llm model type

* refactor code

- change process to configService.get("YORKIE_INTELLIGENCE")

* fix: process to configService, add throw Error in langchain module
  • Loading branch information
sihyeong671 authored Oct 19, 2024
1 parent bc5c353 commit b7c7a5c
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 34 deletions.
4 changes: 2 additions & 2 deletions backend/.env.development
Original file line number Diff line number Diff line change
Expand Up @@ -42,9 +42,9 @@ YORKIE_PROJECT_NAME=default
YORKIE_PROJECT_SECRET_KEY=""

# YORKIE_INTELLIGENCE: Whether to enable Yorkie Intelligence for collaborative editing.
# Available options: false, ollama:lamma3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini
# Available options: false, ollama:llama3.1, ollama:gemma2, ollama:gemma2:2b, ollama:phi3, ollama:mistral, ollama:neural-chat, ollama:starling-lm, ollama:solar, openai:gpt-3.5-turbo, openai:gpt-4o-mini, etc.
# If set to openai:gpt-3.5-turbo or openai:gpt-4o-mini, OPENAI_API_KEY is not required.
YORKIE_INTELLIGENCE="ollama:gemma2:2b"
YORKIE_INTELLIGENCE="ollama:llama3.2:1b"

# OLLAMA_HOST_URL: yorkie-intelligence ollama url
OLLAMA_HOST_URL=http://localhost:11434
Expand Down
45 changes: 13 additions & 32 deletions backend/src/langchain/langchain.module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,46 +2,26 @@ import { Module } from "@nestjs/common";
import { ChatOpenAI } from "@langchain/openai";
import { ChatOllama } from "@langchain/ollama";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";

type ModelList = {
[key: string]: string[];
};

const modelList: ModelList = {
ollama: [
"lamma3.1",
"gemma2",
"gemma2:2b",
"phi3",
"mistral",
"neural-chat",
"starling-lm",
"solar",
],
openai: ["gpt-3.5-turbo", "gpt-4o-mini"],
};
import { ConfigService } from "@nestjs/config";

const chatModelFactory = {
provide: "ChatModel",
useFactory: () => {
const modelType = process.env.YORKIE_INTELLIGENCE;
useFactory: (configService: ConfigService) => {
const modelType = configService.get("YORKIE_INTELLIGENCE");
try {
// Split the modelType string into provider and model
// ex) "ollama:gemma2:2b" => ["ollama", "gemma2:2b"]
const [provider, model] = modelType.split(/:(.+)/);
let chatModel: BaseChatModel | ChatOllama;

if (modelList[provider]?.includes(model)) {
if (provider === "ollama") {
chatModel = new ChatOllama({
model: model,
baseUrl: process.env.OLLAMA_HOST_URL,
checkOrPullModel: true,
streaming: true,
});
} else if (provider === "openai") {
chatModel = new ChatOpenAI({ modelName: model });
}
if (provider === "ollama") {
chatModel = new ChatOllama({
model: model,
baseUrl: configService.get("OLLAMA_HOST_URL"),
checkOrPullModel: true,
streaming: true,
});
} else if (provider === "openai") {
chatModel = new ChatOpenAI({ modelName: model });
}

if (!chatModel) throw new Error();
Expand All @@ -51,6 +31,7 @@ const chatModelFactory = {
throw new Error(`${modelType} is not found. Please check your model name`);
}
},
inject: [ConfigService],
};

@Module({
Expand Down

0 comments on commit b7c7a5c

Please sign in to comment.