diff --git a/CHANGELOG.md b/CHANGELOG.md index 7ce529cf33ec..3d9bdcc5fa34 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,240 @@ # Changelog +### [Version 1.19.31](https://github.com/lobehub/lobe-chat/compare/v1.19.30...v1.19.31) + +Released on **2024-09-24** + +#### 💄 Styles + +- **misc**: Add google gemini 1.5 002 series. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Add google gemini 1.5 002 series, closes [#4118](https://github.com/lobehub/lobe-chat/issues/4118) ([10145fa](https://github.com/lobehub/lobe-chat/commit/10145fa)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.30](https://github.com/lobehub/lobe-chat/compare/v1.19.29...v1.19.30) + +Released on **2024-09-24** + +#### 💄 Styles + +- **misc**: Disable taichu2.0 functioncall & default disable taichu2.0v model. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Disable taichu2.0 functioncall & default disable taichu2.0v model, closes [#4117](https://github.com/lobehub/lobe-chat/issues/4117) ([00da53b](https://github.com/lobehub/lobe-chat/commit/00da53b)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.29](https://github.com/lobehub/lobe-chat/compare/v1.19.28...v1.19.29) + +Released on **2024-09-24** + +#### 💄 Styles + +- **misc**: Update taichu provider info & add taichu vision model. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Update taichu provider info & add taichu vision model, closes [#4114](https://github.com/lobehub/lobe-chat/issues/4114) ([e5331db](https://github.com/lobehub/lobe-chat/commit/e5331db)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.28](https://github.com/lobehub/lobe-chat/compare/v1.19.27...v1.19.28) + +Released on **2024-09-24** + +#### 💄 Styles + +- **misc**: Add function call support for Stepfun. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Add function call support for Stepfun, closes [#4101](https://github.com/lobehub/lobe-chat/issues/4101) ([8d7d96e](https://github.com/lobehub/lobe-chat/commit/8d7d96e)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.27](https://github.com/lobehub/lobe-chat/compare/v1.19.26...v1.19.27) + +Released on **2024-09-24** + +#### 💄 Styles + +- **misc**: Improve images display in chat messages. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Improve images display in chat messages, closes [#3475](https://github.com/lobehub/lobe-chat/issues/3475) ([c54c7ed](https://github.com/lobehub/lobe-chat/commit/c54c7ed)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.26](https://github.com/lobehub/lobe-chat/compare/v1.19.25...v1.19.26) + +Released on **2024-09-24** + +#### 🐛 Bug Fixes + +- **misc**: Fix url config import after user state init. + +#### 💄 Styles + +- **misc**: Add support function call for 360AI, left sidebar has only assistants. + +
+ +
+Improvements and Fixes + +#### What's fixed + +- **misc**: Fix url config import after user state init, closes [#4072](https://github.com/lobehub/lobe-chat/issues/4072) ([18a240c](https://github.com/lobehub/lobe-chat/commit/18a240c)) + +#### Styles + +- **misc**: Add support function call for 360AI, closes [#4099](https://github.com/lobehub/lobe-chat/issues/4099) ([536696b](https://github.com/lobehub/lobe-chat/commit/536696b)) +- **misc**: Left sidebar has only assistants, closes [#4108](https://github.com/lobehub/lobe-chat/issues/4108) ([db1f81c](https://github.com/lobehub/lobe-chat/commit/db1f81c)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.25](https://github.com/lobehub/lobe-chat/compare/v1.19.24...v1.19.25) + +Released on **2024-09-24** + +#### 🐛 Bug Fixes + +- **misc**: Add missing translations. + +
+ +
+Improvements and Fixes + +#### What's fixed + +- **misc**: Add missing translations, closes [#4106](https://github.com/lobehub/lobe-chat/issues/4106) ([c24bf45](https://github.com/lobehub/lobe-chat/commit/c24bf45)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.24](https://github.com/lobehub/lobe-chat/compare/v1.19.23...v1.19.24) + +Released on **2024-09-23** + +#### 🐛 Bug Fixes + +- **misc**: Fix artifacts code language highlight. + +
+ +
+Improvements and Fixes + +#### What's fixed + +- **misc**: Fix artifacts code language highlight, closes [#4096](https://github.com/lobehub/lobe-chat/issues/4096) ([2d956a3](https://github.com/lobehub/lobe-chat/commit/2d956a3)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ +### [Version 1.19.23](https://github.com/lobehub/lobe-chat/compare/v1.19.22...v1.19.23) + +Released on **2024-09-23** + +#### 💄 Styles + +- **misc**: Add spark max-32k model. + +
+ +
+Improvements and Fixes + +#### Styles + +- **misc**: Add spark max-32k model, closes [#4071](https://github.com/lobehub/lobe-chat/issues/4071) ([c11b57b](https://github.com/lobehub/lobe-chat/commit/c11b57b)) + +
+ +
+ +[![](https://img.shields.io/badge/-BACK_TO_TOP-151515?style=flat-square)](#readme-top) + +
+ ### [Version 1.19.22](https://github.com/lobehub/lobe-chat/compare/v1.19.21...v1.19.22) Released on **2024-09-22** diff --git a/README.md b/README.md index 9753dbda7f81..62567be844b8 100644 --- a/README.md +++ b/README.md @@ -50,9 +50,7 @@ One-click **FREE** deployment of your private OpenAI ChatGPT/Claude/Gemini/Groq/ #### TOC -- [Lobe Chat](#lobe-chat) - - [TOC](#toc) - - [](#) +- [Lobe Chat](#lobe-chat) - [TOC](#toc) - [](#) - [👋🏻 Getting Started \& Join Our Community](#-getting-started--join-our-community) - [✨ Features](#-features) - [`1` File Upload/Knowledge Base](#1-file-uploadknowledge-base) @@ -250,7 +248,7 @@ In addition, these plugins are not limited to news aggregation, but can also ext > > Learn more about [📘 Plugin Usage][docs-usage-plugin] by checking it out. - + | Recent Submits | Description | | ---------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------- | @@ -289,16 +287,16 @@ Our marketplace is not just a showcase platform but also a collaborative space. > We welcome all users to join this growing ecosystem and participate in the iteration and optimization of agents. > Together, we can create more interesting, practical, and innovative agents, further enriching the diversity and practicality of the agent offerings. - + -| Recent Submits | Description | -| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| [Exam Essay Writing Master](https://chat-preview.lobehub.com/market?agent=exam-composition-writing)
By **[NriotHrreion](https://github.com/NriotHrreion)** on **2024-09-23** | A master of writing exam essays who excels in crafting high-scoring compositions.
`education` `essay` `writing` | -| [Markdown Conversion Expert](https://chat-preview.lobehub.com/market?agent=markdown%E6%8E%92%E7%89%88%E5%A4%A7%E5%B8%88)
By **[cl1107](https://github.com/cl1107)** on **2024-09-23** | Proficient in using Markdown syntax for text structuring and emphasis
`text-structure` `markdown-syntax` `headings` `lists` `bold` `quote` `agulu` | -| [Nutrition Analyzer](https://chat-preview.lobehub.com/market?agent=nutrition-analyzer)
By **[Pandurangmopgar](https://github.com/Pandurangmopgar)** on **2024-09-23** | Nutri Info is an AI-powered nutrition assistant that analyzes food images and nutrition labels, providing simple explanations of nutritional content, benefits, and potential downsides. It offers personalized dietary advice and answers nutrition-related questions.
`nutrition` `ai` `health` `food-analysis` `meal-planning` | -| [Database Naming Assistant](https://chat-preview.lobehub.com/market?agent=database-name-helper)
By **[ppzhuya](https://github.com/ppzhuya)** on **2024-09-20** | Input a Chinese word, and I will provide five professional English terms for database design fields.
`database` `naming` `translation` `development` `programming` | +| Recent Submits | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| [Contract Clause Refiner v1.0](https://chat-preview.lobehub.com/market?agent=business-contract)
By **[houhoufm](https://github.com/houhoufm)** on **2024-09-24** | Output: {Optimize contract clauses for professional and concise expression}
`contract-optimization` `legal-consultation` `copywriting` `terminology` `project-management` | +| [Meeting Assistant v1.0](https://chat-preview.lobehub.com/market?agent=meeting)
By **[houhoufm](https://github.com/houhoufm)** on **2024-09-24** | Professional meeting report assistant, distilling meeting key points into report sentences
`meeting-reports` `writing` `communication` `workflow` `professional-skills` | +| [Stable Album Cover Prompter](https://chat-preview.lobehub.com/market?agent=title-bpm-stimmung)
By **[MellowTrixX](https://github.com/MellowTrixX)** on **2024-09-24** | Professional graphic designer for front cover design specializing in creating visual concepts and designs for melodic techno music albums.
`album-cover` `prompt` `stable-diffusion` `cover-design` `cover-prompts` | +| [Advertising Copywriting Master](https://chat-preview.lobehub.com/market?agent=advertising-copywriting-master)
By **[leter](https://github.com/leter)** on **2024-09-23** | Specializing in product function analysis and advertising copywriting that resonates with user values
`advertising-copy` `user-values` `marketing-strategy` | -> 📊 Total agents: [**355** ](https://github.com/lobehub/lobe-chat-agents) +> 📊 Total agents: [**392** ](https://github.com/lobehub/lobe-chat-agents) diff --git a/README.zh-CN.md b/README.zh-CN.md index 20259c52c25e..03c043e641dd 100644 --- a/README.zh-CN.md +++ b/README.zh-CN.md @@ -241,14 +241,14 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地 > 通过文档了解更多 [📘 插件使用][docs-usage-plugin] - + -| 最近新增 | 插件描述 | -| ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | -| [通义万象图像生成器](https://chat-preview.lobehub.com/settings/agent)
By **YoungTx** on **2024-08-09** | 此插件使用阿里巴巴的通义万象模型根据文本提示生成图像。
`图像` `通义` `万象` | -| [购物工具](https://chat-preview.lobehub.com/settings/agent)
By **shoppingtools** on **2024-07-19** | 在 eBay 和 AliExpress 上搜索产品,查找 eBay 活动和优惠券。获取快速示例。
`购物` `e-bay` `ali-express` `优惠券` | -| [Savvy Trader AI](https://chat-preview.lobehub.com/settings/agent)
By **savvytrader** on **2024-06-27** | 实时股票、加密货币和其他投资数据。
`股票` `分析` | -| [Search1API](https://chat-preview.lobehub.com/settings/agent)
By **fatwang2** on **2024-05-06** | 搜索聚合服务,专为LLMs设计
`web` `search` | +| 最近新增 | 插件描述 | +| ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------ | +| [通义万象图像生成器](https://chat-preview.lobehub.com/settings/agent)
By **YoungTx** on **2024-08-09** | 此插件使用阿里巴巴的通义万象模型根据文本提示生成图像。
`图像` `通义` `万象` | +| [购物工具](https://chat-preview.lobehub.com/settings/agent)
By **shoppingtools** on **2024-07-19** | 在 eBay 和 AliExpress 上搜索产品,查找 eBay 活动和优惠券。获取快速示例。
`购物` `e-bay` `ali-express` `优惠券` | +| [Savvy Trader AI](https://chat-preview.lobehub.com/settings/agent)
By **savvytrader** on **2024-06-27** | 实时股票、加密货币和其他投资数据。
`股票` `分析` | +| [Search1API](https://chat-preview.lobehub.com/settings/agent)
By **fatwang2** on **2024-05-06** | 搜索聚合服务,专为LLMs设计
`web` `search` | > 📊 Total plugins: [**50**](https://github.com/lobehub/lobe-chat-plugins) @@ -276,16 +276,16 @@ LobeChat 的插件生态系统是其核心功能的重要扩展,它极大地 > > 我欢迎所有用户加入这个不断成长的生态系统,共同参与到助手的迭代与优化中来。共同创造出更多有趣、实用且具有创新性的助手,进一步丰富助手的多样性和实用性。 - + -| 最近新增 | 助手说明 | -| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | -| [考场作文学霸](https://chat-preview.lobehub.com/market?agent=exam-composition-writing)
By **[NriotHrreion](https://github.com/NriotHrreion)** on **2024-09-23** | 一个擅长写考场作文的语文学霸
`教育` `作文` `写作` | -| [markdown 排版大师](https://chat-preview.lobehub.com/market?agent=markdown%E6%8E%92%E7%89%88%E5%A4%A7%E5%B8%88)
By **[cl1107](https://github.com/cl1107)** on **2024-09-23** | 擅长使用Markdown语法和 emoji 表情进行精美排版
`markdown` `写作` | -| [营养分析器](https://chat-preview.lobehub.com/market?agent=nutrition-analyzer)
By **[Pandurangmopgar](https://github.com/Pandurangmopgar)** on **2024-09-23** | Nutri Info 是一款由人工智能驱动的营养助手,可以分析食品图像和营养标签,提供营养成分、益处和潜在缺点的简单解释。它提供个性化的饮食建议,并回答与营养相关的问题。
`营养` `人工智能` `健康` `食品分析` `餐饮规划` | -| [数据库取名助手](https://chat-preview.lobehub.com/market?agent=database-name-helper)
By **[ppzhuya](https://github.com/ppzhuya)** on **2024-09-20** | 输入一个中文词汇,我会给出五个专业的数据库设计字段的英文
`数据库` `命名` `翻译` `开发` `编程` | +| 最近新增 | 助手说明 | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------- | +| [合同条款精炼师 v1.0](https://chat-preview.lobehub.com/market?agent=business-contract)
By **[houhoufm](https://github.com/houhoufm)** on **2024-09-24** | 输出: {优化合同条款,专业简洁表达}
`合同优化` `法律咨询` `文案撰写` `专业术语` `项目管理` | +| [会议助手 v1.0](https://chat-preview.lobehub.com/market?agent=meeting)
By **[houhoufm](https://github.com/houhoufm)** on **2024-09-24** | 专业会议汇报助手,提炼会议要点成汇报句子
`会议汇报` `撰写` `沟通` `工作流程` `专业技能` | +| [稳定专辑封面提示生成器](https://chat-preview.lobehub.com/market?agent=title-bpm-stimmung)
By **[MellowTrixX](https://github.com/MellowTrixX)** on **2024-09-24** | 专业的平面设计师,专注于为旋律科技音乐专辑创建视觉概念和设计。
`专辑封面` `提示` `稳定扩散` `封面设计` `封面提示` | +| [广告文案创作大师](https://chat-preview.lobehub.com/market?agent=advertising-copywriting-master)
By **[leter](https://github.com/leter)** on **2024-09-23** | 擅长产品功能分析与用户价值观广告文案创作
`广告文案` `用户价值观` `营销策略` | -> 📊 Total agents: [**355** ](https://github.com/lobehub/lobe-chat-agents) +> 📊 Total agents: [**392** ](https://github.com/lobehub/lobe-chat-agents) diff --git a/docker-compose/local-logto/.env.example b/docker-compose/local-logto/.env.example new file mode 100644 index 000000000000..5fa0bb7b3bf8 --- /dev/null +++ b/docker-compose/local-logto/.env.example @@ -0,0 +1,33 @@ +# Logto secret +LOGTO_CLIENT_ID= +LOGTO_CLIENT_SECRET= + +# MinIO S3 configuration +MINIO_ROOT_USER=YOUR_MINIO_USER +MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD + +# Configure the bucket information of MinIO +MINIO_LOBE_BUCKET=lobe +S3_ACCESS_KEY_ID= +S3_SECRET_ACCESS_KEY= + +# Proxy, if you need it +# HTTP_PROXY=http://localhost:7890 +# HTTPS_PROXY=http://localhost:7890 + + +# Other environment variables, as needed. You can refer to the environment variables configuration for the client version, making sure not to have ACCESS_CODE. +# OPENAI_API_KEY=sk-xxxx +# OPENAI_PROXY_URL=https://api.openai.com/v1 +# OPENAI_MODEL_LIST=... + + +# ----- Other config ----- +# if no special requirements, no need to change +LOBE_PORT=3210 +LOGTO_PORT=3001 +MINIO_PORT=9000 + +# Postgres related, which are the necessary environment variables for DB +LOBE_DB_NAME=lobechat +POSTGRES_PASSWORD=uWNZugjBqixf8dxC diff --git a/docker-compose/local-logto/.env.zh-CN.example b/docker-compose/local-logto/.env.zh-CN.example new file mode 100644 index 000000000000..4557b2daa80a --- /dev/null +++ b/docker-compose/local-logto/.env.zh-CN.example @@ -0,0 +1,33 @@ +# Logto secret +LOGTO_CLIENT_ID= +LOGTO_CLIENT_SECRET= + +# MinIO S3 配置 +MINIO_ROOT_USER=YOUR_MINIO_USER +MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD + +# 在下方配置 minio 中添加的桶 +MINIO_LOBE_BUCKET=lobe +S3_ACCESS_KEY_ID= +S3_SECRET_ACCESS_KEY= + + +# Proxy,如果你需要的话(比如你使用 GitHub 作为鉴权服务提供商) +# HTTP_PROXY=http://localhost:7890 +# HTTPS_PROXY=http://localhost:7890 + +# 其他环境变量,视需求而定,可以参照客户端版本的环境变量配置,注意不要有 ACCESS_CODE +# OPENAI_API_KEY=sk-xxxx +# OPENAI_PROXY_URL=https://api.openai.com/v1 +# OPENAI_MODEL_LIST=... + + +# ----- 相关配置 start ----- +# 如没有特殊需要不用更改 +LOBE_PORT=3210 +LOGTO_PORT=3001 +MINIO_PORT=9000 + +# Postgres 相关,也即 DB 必须的环境变量 +LOBE_DB_NAME=lobechat +POSTGRES_PASSWORD=uWNZugjBqixf8dxC diff --git a/docker-compose/local-logto/docker-compose.yml b/docker-compose/local-logto/docker-compose.yml new file mode 100644 index 000000000000..cc59b6a2afa8 --- /dev/null +++ b/docker-compose/local-logto/docker-compose.yml @@ -0,0 +1,102 @@ +services: + network-service: + image: alpine + container_name: lobe-network + ports: + - '${MINIO_PORT}:${MINIO_PORT}' # MinIO API + - '9001:9001' # MinIO Console + - '${LOGTO_PORT}:${LOGTO_PORT}' # Logto + - '3002:3002' # Logto Admin + - '${LOBE_PORT}:3210' # LobeChat + command: tail -f /dev/null + networks: + - lobe-network + + postgresql: + image: pgvector/pgvector:pg16 + container_name: lobe-postgres + ports: + - "5432:5432" + volumes: + - './data:/var/lib/postgresql/data' + environment: + - 'POSTGRES_DB=${LOBE_DB_NAME}' + - 'POSTGRES_PASSWORD=${POSTGRES_PASSWORD}' + healthcheck: + test: ['CMD-SHELL', 'pg_isready -U postgres'] + interval: 5s + timeout: 5s + retries: 5 + restart: always + networks: + - lobe-network + + minio: + image: minio/minio + container_name: lobe-minio + network_mode: 'service:network-service' + volumes: + - './s3_data:/etc/minio/data' + environment: + - 'MINIO_ROOT_USER=${MINIO_ROOT_USER}' + - 'MINIO_ROOT_PASSWORD=${MINIO_ROOT_PASSWORD}' + - 'MINIO_API_CORS_ALLOW_ORIGIN=http://localhost:${LOBE_PORT}' + restart: always + command: > + server /etc/minio/data --address ":${MINIO_PORT}" --console-address ":9001" + + logto: + image: svhd/logto + container_name: lobe-logto + network_mode: 'service:network-service' + depends_on: + postgresql: + condition: service_healthy + environment: + - 'TRUST_PROXY_HEADER=1' + - 'PORT=${LOGTO_PORT}' + - 'DB_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/logto' + - 'ENDPOINT=http://localhost:${LOGTO_PORT}' + - 'ADMIN_ENDPOINT=http://localhost:3002' + entrypoint: ['sh', '-c', 'npm run cli db seed -- --swe && npm start'] + + + lobe: + image: lobehub/lobe-chat-database + container_name: lobe-database + network_mode: 'service:network-service' + depends_on: + postgresql: + condition: service_healthy + network-service: + condition: service_started + minio: + condition: service_started + logto: + condition: service_started + + environment: + - 'APP_URL=http://localhost:3210' + - 'NEXT_AUTH_SSO_PROVIDERS=logto' + - 'KEY_VAULTS_SECRET=Kix2wcUONd4CX51E/ZPAd36BqM4wzJgKjPtz2sGztqQ=' + - 'NEXT_AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg' + - 'NEXTAUTH_URL=http://localhost:${LOBE_PORT}/api/auth' + - 'LOGTO_ISSUER=http://localhost:${LOGTO_PORT}/oidc' + - 'DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/${LOBE_DB_NAME}' + - 'S3_ENDPOINT=http://localhost:${MINIO_PORT}' + - 'S3_BUCKET=${MINIO_LOBE_BUCKET}' + - 'S3_PUBLIC_DOMAIN=http://localhost:${MINIO_PORT}' + - 'S3_ENABLE_PATH_STYLE=1' + env_file: + - .env + restart: always + +volumes: + data: + driver: local + s3_data: + driver: local + +networks: + lobe-network: + driver: bridge diff --git a/docker-compose/local/.env.example b/docker-compose/local/.env.example index 5fa0bb7b3bf8..40ccc8e622a2 100644 --- a/docker-compose/local/.env.example +++ b/docker-compose/local/.env.example @@ -1,16 +1,3 @@ -# Logto secret -LOGTO_CLIENT_ID= -LOGTO_CLIENT_SECRET= - -# MinIO S3 configuration -MINIO_ROOT_USER=YOUR_MINIO_USER -MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD - -# Configure the bucket information of MinIO -MINIO_LOBE_BUCKET=lobe -S3_ACCESS_KEY_ID= -S3_SECRET_ACCESS_KEY= - # Proxy, if you need it # HTTP_PROXY=http://localhost:7890 # HTTPS_PROXY=http://localhost:7890 @@ -22,12 +9,27 @@ S3_SECRET_ACCESS_KEY= # OPENAI_MODEL_LIST=... -# ----- Other config ----- +# =========================== +# ====== Preset config ====== +# =========================== # if no special requirements, no need to change LOBE_PORT=3210 -LOGTO_PORT=3001 +CASDOOR_PORT=8000 MINIO_PORT=9000 # Postgres related, which are the necessary environment variables for DB LOBE_DB_NAME=lobechat POSTGRES_PASSWORD=uWNZugjBqixf8dxC + +# Casdoor secret +AUTH_CASDOOR_ID=a387a4892ee19b1a2249 +AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354 + +# MinIO S3 configuration +MINIO_ROOT_USER=YOUR_MINIO_USER +MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD + +# Configure the bucket information of MinIO +MINIO_LOBE_BUCKET=lobe +S3_ACCESS_KEY_ID=soaucnP8Bip0TDdUjxng +S3_SECRET_ACCESS_KEY=ZPUzvY34umfcfxvWKSv0P00vczVMB6YmgJS5J9eO \ No newline at end of file diff --git a/docker-compose/local/.env.zh-CN.example b/docker-compose/local/.env.zh-CN.example index 4557b2daa80a..f2e174e58499 100644 --- a/docker-compose/local/.env.zh-CN.example +++ b/docker-compose/local/.env.zh-CN.example @@ -1,33 +1,36 @@ -# Logto secret -LOGTO_CLIENT_ID= -LOGTO_CLIENT_SECRET= - -# MinIO S3 配置 -MINIO_ROOT_USER=YOUR_MINIO_USER -MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD - -# 在下方配置 minio 中添加的桶 -MINIO_LOBE_BUCKET=lobe -S3_ACCESS_KEY_ID= -S3_SECRET_ACCESS_KEY= - - # Proxy,如果你需要的话(比如你使用 GitHub 作为鉴权服务提供商) # HTTP_PROXY=http://localhost:7890 # HTTPS_PROXY=http://localhost:7890 + # 其他环境变量,视需求而定,可以参照客户端版本的环境变量配置,注意不要有 ACCESS_CODE # OPENAI_API_KEY=sk-xxxx # OPENAI_PROXY_URL=https://api.openai.com/v1 # OPENAI_MODEL_LIST=... -# ----- 相关配置 start ----- +# =================== +# ===== 预设配置 ===== +# =================== # 如没有特殊需要不用更改 LOBE_PORT=3210 -LOGTO_PORT=3001 +CASDOOR_PORT=8000 MINIO_PORT=9000 # Postgres 相关,也即 DB 必须的环境变量 LOBE_DB_NAME=lobechat POSTGRES_PASSWORD=uWNZugjBqixf8dxC + +# Casdoor secret +AUTH_CASDOOR_ID=a387a4892ee19b1a2249 +AUTH_CASDOOR_SECRET=dbf205949d704de81b0b5b3603174e23fbecc354 + +# MinIO S3 配置 +MINIO_ROOT_USER=YOUR_MINIO_USER +MINIO_ROOT_PASSWORD=YOUR_MINIO_PASSWORD + +# 在下方配置 minio 中添加的桶 +MINIO_LOBE_BUCKET=lobe +S3_ACCESS_KEY_ID=soaucnP8Bip0TDdUjxng +S3_SECRET_ACCESS_KEY=ZPUzvY34umfcfxvWKSv0P00vczVMB6YmgJS5J9eO + diff --git a/docker-compose/local/docker-compose.yml b/docker-compose/local/docker-compose.yml index cc59b6a2afa8..6d0a7821c88d 100644 --- a/docker-compose/local/docker-compose.yml +++ b/docker-compose/local/docker-compose.yml @@ -5,8 +5,7 @@ services: ports: - '${MINIO_PORT}:${MINIO_PORT}' # MinIO API - '9001:9001' # MinIO Console - - '${LOGTO_PORT}:${LOGTO_PORT}' # Logto - - '3002:3002' # Logto Admin + - '${CASDOOR_PORT}:${CASDOOR_PORT}' # Casdoor - '${LOBE_PORT}:3210' # LobeChat command: tail -f /dev/null networks: @@ -45,21 +44,22 @@ services: command: > server /etc/minio/data --address ":${MINIO_PORT}" --console-address ":9001" - logto: - image: svhd/logto - container_name: lobe-logto + casdoor: + image: casbin/casdoor + container_name: lobe-casdoor + entrypoint: /bin/sh -c './server --createDatabase=true' network_mode: 'service:network-service' depends_on: postgresql: condition: service_healthy environment: - - 'TRUST_PROXY_HEADER=1' - - 'PORT=${LOGTO_PORT}' - - 'DB_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/logto' - - 'ENDPOINT=http://localhost:${LOGTO_PORT}' - - 'ADMIN_ENDPOINT=http://localhost:3002' - entrypoint: ['sh', '-c', 'npm run cli db seed -- --swe && npm start'] - + RUNNING_IN_DOCKER: "true" + driverName: "postgres" + dataSourceName: "user=postgres password=${POSTGRES_PASSWORD} host=postgresql port=5432 sslmode=disable dbname=casdoor" + origin: "http://localhost:${CASDOOR_PORT}" + runmode: "dev" + volumes: + - ./init_data.json:/init_data.json lobe: image: lobehub/lobe-chat-database @@ -72,21 +72,22 @@ services: condition: service_started minio: condition: service_started - logto: + casdoor: condition: service_started environment: - 'APP_URL=http://localhost:3210' - - 'NEXT_AUTH_SSO_PROVIDERS=logto' + - 'NEXT_AUTH_SSO_PROVIDERS=casdoor' - 'KEY_VAULTS_SECRET=Kix2wcUONd4CX51E/ZPAd36BqM4wzJgKjPtz2sGztqQ=' - 'NEXT_AUTH_SECRET=NX2kaPE923dt6BL2U8e9oSre5RfoT7hg' - - 'NEXTAUTH_URL=http://localhost:${LOBE_PORT}/api/auth' - - 'LOGTO_ISSUER=http://localhost:${LOGTO_PORT}/oidc' + - 'AUTH_URL=http://localhost:${LOBE_PORT}/api/auth' + - 'AUTH_CASDOOR_ISSUER=http://localhost:${CASDOOR_PORT}' - 'DATABASE_URL=postgresql://postgres:${POSTGRES_PASSWORD}@postgresql:5432/${LOBE_DB_NAME}' - 'S3_ENDPOINT=http://localhost:${MINIO_PORT}' - 'S3_BUCKET=${MINIO_LOBE_BUCKET}' - 'S3_PUBLIC_DOMAIN=http://localhost:${MINIO_PORT}' - 'S3_ENABLE_PATH_STYLE=1' + - 'LLM_VISION_IMAGE_USE_BASE64=1' env_file: - .env restart: always diff --git a/docker-compose/local/init_data.json.tar.gz b/docker-compose/local/init_data.json.tar.gz new file mode 100644 index 000000000000..bb14d457a6e3 Binary files /dev/null and b/docker-compose/local/init_data.json.tar.gz differ diff --git a/docker-compose/local/s3_data.tar.gz b/docker-compose/local/s3_data.tar.gz new file mode 100644 index 000000000000..7174fb563f40 Binary files /dev/null and b/docker-compose/local/s3_data.tar.gz differ diff --git a/docker-compose/local/setup.sh b/docker-compose/local/setup.sh new file mode 100644 index 000000000000..c4bef99d53d0 --- /dev/null +++ b/docker-compose/local/setup.sh @@ -0,0 +1,242 @@ +#!/bin/bash + +# ================== +# == Env settings == +# ================== + +# ====================== +# == Process the args == +# ====================== + +# 1. Default values of arguments +# Arg: -f +# Determine force download asserts, default is not +FORCE_DOWNLOAD=false + +# Arg: -l or --lang +# Determine the language to show, default is en +LANGUAGE="en_US" + +# Arg: --url +# Determine the source URL to download files +SOURCE_URL="https://raw.githubusercontent.com/lobehub/lobe-chat/main" + +# 2. Parse script arguments +while getopts "fl:-:" opt; do + case $opt in + f) + FORCE_DOWNLOAD=true + ;; + l) + LANGUAGE=$OPTARG + ;; + -) + case "${OPTARG}" in + lang) + LANGUAGE="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) + ;; + url) + SOURCE_URL="${!OPTIND}"; OPTIND=$(( $OPTIND + 1 )) + ;; + *) + echo "Usage: $0 [-f] [-l language|--lang language] [--url source]" >&2 + exit 1 + ;; + esac + ;; + *) + echo "Usage: $0 [-f] [-l language|--lang language] [--url source]" >&2 + exit 1 + ;; + esac +done + + +# =============== +# == Variables == +# =============== +# File list +SUB_DIR="docker-compose/local" +FILES=( + "$SUB_DIR/docker-compose.yml" + "$SUB_DIR/.env.example" + "$SUB_DIR/init_data.json.tar.gz" + "$SUB_DIR/s3_data.tar.gz" +) + +# Supported languages and messages +# Arg: -l --lang +# If the language is not supported, default to English +# Function to show messages +show_message() { + local key="$1" + case $key in + downloading) + case $LANGUAGE in + zh_CN) + echo "正在下载文件..." + ;; + *) + echo "Downloading files..." + ;; + esac + ;; + downloaded) + case $LANGUAGE in + zh_CN) + echo " 已经存在,跳过下载。" + ;; + *) + echo " already exists, skipping download." + ;; + esac + ;; + extracted_success) + case $LANGUAGE in + zh_CN) + echo " 解压成功到目录:" + ;; + *) + echo " extracted successfully to directory: " + ;; + esac + ;; + extracted_failed) + case $LANGUAGE in + zh_CN) + echo " 解压失败。" + ;; + *) + echo " extraction failed." + ;; + esac + ;; + file_not_exists) + case $LANGUAGE in + zh_CN) + echo " 不存在。" + ;; + *) + echo " does not exist." + ;; + esac + ;; + tips_run_command) + case $LANGUAGE in + zh_CN) + echo "您已经完成了所有配置文件的下载。请运行以下命令启动LobeChat:" + ;; + *) + echo "You have completed downloading all configuration files. Please run this command to start LobeChat:" + ;; + esac + ;; + tips_show_documentation) + case $LANGUAGE in + zh_CN) + echo "完整的环境变量在'.env'中可以在文档中找到:" + ;; + *) + echo "Full environment variables in the '.env' can be found at the documentation on " + ;; + esac + ;; + tips_show_documentation_url) + case $LANGUAGE in + zh_CN) + echo "https://lobehub.com/zh/docs/self-hosting/environment-variables" + ;; + *) + echo "https://lobehub.com/docs/self-hosting/environment-variables" + ;; + esac + ;; + tips_warning) + case $LANGUAGE in + zh_CN) + echo "警告:不要在生产环境中使用此演示应用程序!!!" + ;; + *) + echo "Warning: do not use this demo application in production!!!" + ;; + esac + ;; + esac +} + +# Function to download files +download_file() { + local file_url="$1" + local local_file="$2" + + if [ "$FORCE_DOWNLOAD" = false ] && [ -e "$local_file" ]; then + echo "$local_file" $(show_message "downloaded") + return 0 + fi + + wget -q --show-progress "$file_url" -O "$local_file" +} + +extract_file() { + local file_name=$1 + local target_dir=$2 + + if [ -e "$file_name" ]; then + tar -zxvf "$file_name" -C "$target_dir" > /dev/null 2>&1 + if [ $? -eq 0 ]; then + echo "$file_name" $(show_message "extracted_success") "$target_dir" + else + echo "$file_name" $(show_message "extracted_failed") + exit 1 + fi + else + echo "$file_name" $(show_message "file_not_exists") + exit 1 + fi +} + +# Define colors +declare -A colors +colors=( + [black]="\e[30m" + [red]="\e[31m" + [green]="\e[32m" + [yellow]="\e[33m" + [blue]="\e[34m" + [magenta]="\e[35m" + [cyan]="\e[36m" + [white]="\e[37m" + [reset]="\e[0m" +) + +print_centered() { + local text="$1" # Get input texts + local color="${2:-reset}" # Get color, default to reset + local term_width=$(tput cols) # Get terminal width + local text_length=${#text} # Get text length + local padding=$(( (term_width - text_length) / 2 )) # Get padding + # Check if the color is valid + if [[ -z "${colors[$color]}" ]]; then + echo "Invalid color specified. Available colors: ${!colors[@]}" + return 1 + fi + # Print the text with padding + printf "%*s${colors[$color]}%s${colors[reset]}\n" $padding "" "$text" +} + +# Download files asynchronously +download_file "$SOURCE_URL/${FILES[0]}" "docker-compose.yml" +download_file "$SOURCE_URL/${FILES[1]}" ".env" +download_file "$SOURCE_URL/${FILES[2]}" "init_data.json.tar.gz" +download_file "$SOURCE_URL/${FILES[3]}" "s3_data.tar.gz" + +# Extract .tar.gz file without output +extract_file "s3_data.tar.gz" "." +extract_file "init_data.json.tar.gz" "." + +# Display final message +printf "\n%s\n\n" "$(show_message "tips_run_command")" +print_centered "docker compose up -d" "green" +printf "\n%s" "$(show_message "tips_show_documentation")" +printf "%s\n" $(show_message "tips_show_documentation_url") +printf "\n\e[33m%s\e[0m\n" "$(show_message "tips_warning")" \ No newline at end of file diff --git a/docs/self-hosting/advanced/auth/next-auth/auth0.mdx b/docs/self-hosting/advanced/auth/next-auth/auth0.mdx index aafee8b06d8d..c35ef562849c 100644 --- a/docs/self-hosting/advanced/auth/next-auth/auth0.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/auth0.mdx @@ -77,12 +77,11 @@ When deploying LobeChat, you need to configure the following environment variabl | Environment Variable | Type | Description | | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | Key used to encrypt Auth.js session tokens. You can generate a key using the following command: `openssl rand -base64 32` | -| `NEXT_AUTH_SSO_PROVIDERS` | Optional | Select the single sign-on provider for LoboChat. Use `auth0` for Auth0. | -| `AUTH0_CLIENT_ID` | Required | Client ID of the Auth0 application | -| `AUTH0_CLIENT_SECRET` | Required | Client Secret of the Auth0 application | -| `AUTH0_ISSUER` | Required | Domain of the Auth0 application, `https://example.auth0.com` | -| `ACCESS_CODE` | Required | Add a password to access this service. You can set a sufficiently long random password to "disable" access code authorization. | -| `NEXTAUTH_URL` | Optional | The URL is used to specify the callback address for the execution of OAuth authentication in Auth.js. It needs to be set only when the default address is incorrect. `https://example.com/api/auth` | +| `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the single sign-on provider for LoboChat. Use `auth0` for Auth0. | +| `AUTH_AUTH0_ID` | Required | Client ID of the Auth0 application | +| `AUTH_AUTH0_SECRET` | Required | Client Secret of the Auth0 application | +| `AUTH_AUTH0_ISSUER` | Required | Domain of the Auth0 application, `https://example.auth0.com` | +| `NEXTAUTH_URL` | Required | The URL is used to specify the callback address for the execution of OAuth authentication in Auth.js. It needs to be set only when the default address is incorrect. `https://example.com/api/auth` | You can refer to the related variable details at [📘Environment Variables](/docs/self-hosting/environment-variable#auth0). diff --git a/docs/self-hosting/advanced/auth/next-auth/auth0.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/auth0.zh-CN.mdx index f9fe18d5ee36..a8305a1a179c 100644 --- a/docs/self-hosting/advanced/auth/next-auth/auth0.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/auth0.zh-CN.mdx @@ -73,11 +73,10 @@ http(s)://your-domain/api/auth/callback/auth0 | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Auth0 请填写 `auth0`。 | -| `AUTH0_CLIENT_ID` | 必选 | Auth0 应用程序的 Client ID | -| `AUTH0_CLIENT_SECRET` | 必选 | Auth0 应用程序的 Client Secret | -| `AUTH0_ISSUER` | 必选 | Auth0 应用程序的 Domain,`https://example.auth0.com` | -| `ACCESS_CODE` | 必选 | 添加访问此服务的密码,你可以设置一个足够长的随机密码以 “禁用” 访问码授权 | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | +| `AUTH_AUTH0_ID` | 必选 | Auth0 应用程序的 Client ID | +| `AUTH_AUTH0_SECRET` | 必选 | Auth0 应用程序的 Client Secret | +| `AUTH_AUTH0_ISSUER` | 必选 | Auth0 应用程序的 Domain,`https://example.auth0.com` | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variables/auth#auth-0) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/authelia.mdx b/docs/self-hosting/advanced/auth/next-auth/authelia.mdx index 84a14297895c..053806b4544e 100644 --- a/docs/self-hosting/advanced/auth/next-auth/authelia.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/authelia.mdx @@ -59,10 +59,10 @@ When deploying LobeChat, you need to configure the following environment variabl | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | The secret used to encrypt Auth.js session tokens. You can generate a secret using the following command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the SSO provider for LoboChat. Use `authentik` for Authentik. | -| `AUTHELIA_CLIENT_ID` | Required | The id just configured in Authelia, example value is lobe-chat | -| `AUTHELIA_CLIENT_SECRET` | The plaintext corresponding to the secret just configured in Authelia, example value is insecure_secret | -| `AUTHELIA_ISSUER` | Required | Your Authelia URL, for example https://sso.example.com | -| `NEXTAUTH_URL` | Optional | This URL is used to specify the callback address for Auth.js when performing OAuth verification. It only needs to be set when the default generated redirect address is incorrect. https://chat.example.com/api/auth | +| `AUTH_AUTHELIA_ID` | Required | The id just configured in Authelia, example value is lobe-chat | +| `AUTH_AUTHELIA_SECRET` | Required | The plaintext corresponding to the secret just configured in Authelia, example value is insecure_secret | +| `AUTH_AUTHELIA_ISSUER` | Required | Your Authelia URL, for example https://sso.example.com | +| `NEXTAUTH_URL` | Required | This URL is used to specify the callback address for Auth.js when performing OAuth verification. It only needs to be set when the default generated redirect address is incorrect. https://chat.example.com/api/auth | Go to [📘 Environment Variables](/docs/self-hosting/environment-variable#Authelia) for details about the variables. diff --git a/docs/self-hosting/advanced/auth/next-auth/authelia.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/authelia.zh-CN.mdx index 92100d905cae..dc66c9ffa16d 100644 --- a/docs/self-hosting/advanced/auth/next-auth/authelia.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/authelia.zh-CN.mdx @@ -57,10 +57,10 @@ identity_providers: | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Authelia 请填写 `authelia`。 | -| `AUTHELIA_CLIENT_ID` | 必选 | 刚刚在 Authelia 配置的 `id`,示例值是 `lobe-chat` | -| `AUTHELIA_CLIENT_SECRET` | 必选 | 刚刚在 Authelia 配置的 `secret` 对应的明文,示例值是 `insecure_secret` | -| `AUTHELIA_ISSUER` | 必选 |您的 Authelia 的网址,例如 `https://sso.example.com` | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://chat.example.com/api/auth` | +| `AUTH_AUTHELIA_ID` | 必选 | 刚刚在 Authelia 配置的 `id`,示例值是 `lobe-chat` | +| `AUTH_AUTHELIA_SECRET` | 必选 | 刚刚在 Authelia 配置的 `secret` 对应的明文,示例值是 `insecure_secret` | +| `AUTH_AUTHELIA_ISSUER` | 必选 |您的 Authelia 的网址,例如 `https://sso.example.com` | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://chat.example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variable#Authelia) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/authentik.mdx b/docs/self-hosting/advanced/auth/next-auth/authentik.mdx index 21ea9803e8c2..fdd6dde35008 100644 --- a/docs/self-hosting/advanced/auth/next-auth/authentik.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/authentik.mdx @@ -56,10 +56,10 @@ When deploying LobeChat, you need to configure the following environment variabl | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | The secret used to encrypt Auth.js session tokens. You can generate a secret using the following command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the SSO provider for LoboChat. Use `authentik` for Authentik. | -| `AUTHENTIK_CLIENT_ID` | Required | The Client ID from the Authentik application provider details page | -| `AUTHENTIK_CLIENT_SECRET` | Required | The Client Secret from the Authentik application provider details page | -| `AUTHENTIK_ISSUER` | Required | The OpenID Configuration Issuer from the Authentik application provider details page | -| `NEXTAUTH_URL` | Optional | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It only needs to be set when the default generated redirect address is incorrect. `https://example.com/api/auth` | +| `AUTH_AUTHENTIK_ID` | Required | The Client ID from the Authentik application provider details page | +| `AUTH_AUTHENTIK_SECRET` | Required | The Client Secret from the Authentik application provider details page | +| `AUTH_AUTHENTIK_ISSUER` | Required | The OpenID Configuration Issuer from the Authentik application provider details page | +| `NEXTAUTH_URL` | Required | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It only needs to be set when the default generated redirect address is incorrect. `https://example.com/api/auth` | Go to [📘 Environment Variables](/docs/self-hosting/environment-variable#Authentik) for details about the variables. diff --git a/docs/self-hosting/advanced/auth/next-auth/authentik.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/authentik.zh-CN.mdx index c806e43b8174..ca5f708dc9d7 100644 --- a/docs/self-hosting/advanced/auth/next-auth/authentik.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/authentik.zh-CN.mdx @@ -51,10 +51,10 @@ https://your-domain/api/auth/callback/authentik | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Authentik 请填写 `authentik`。 | -| `AUTHENTIK_CLIENT_ID` | 必选 | Authentik 提供程序详情页的 客户端 ID | -| `AUTHENTIK_CLIENT_SECRET` | 必选 | Authentik 提供程序详情页的 客户端 Secret | -| `AUTHENTIK_ISSUER` | 必选 | Authentik 提供程序详情页的 OpenID 配置颁发者 | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | +| `AUTH_AUTHENTIK_ID` | 必选 | Authentik 提供程序详情页的 客户端 ID | +| `AUTH_AUTHENTIK_SECRET` | 必选 | Authentik 提供程序详情页的 客户端 Secret | +| `AUTH_AUTHENTIK_ISSUER` | 必选 | Authentik 提供程序详情页的 OpenID 配置颁发者 | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variable#Authentik) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.mdx b/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.mdx index c13b813525a8..46cb1af6109d 100644 --- a/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.mdx @@ -51,10 +51,10 @@ When deploying LobeChat, you need to configure the following environment variabl | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | The secret used to encrypt Auth.js session tokens. You can generate a secret using the following command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the SSO provider for LoboChat. Use `cloudflare-zero-trust` for Cloudflare Zero Trust. | -| `CLOUDFLARE_ZERO_TRUST_CLIENT_ID` | Required | The Client ID from the Cloudflare Zero Trust application provider details page | -| `CLOUDFLARE_ZERO_TRUST_CLIENT_SECRET` | Required | The Client Secret from the Cloudflare Zero Trust application provider details page | -| `CLOUDFLARE_ZERO_TRUST_ISSUER` | Required | The OpenID Configuration Issuer from the Cloudflare Zero Trust application provider details page | -| `NEXTAUTH_URL` | Optional | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It only needs to be set when the default generated redirect address is incorrect. `https://example.com/api/auth` | +| `AUTH_CLOUDFLARE_ZERO_TRUST_ID` | Required | The Client ID from the Cloudflare Zero Trust application provider details page | +| `AUTH_CLOUDFLARE_ZERO_TRUST_SECRET` | Required | The Client Secret from the Cloudflare Zero Trust application provider details page | +| `AUTH_CLOUDFLARE_ZERO_TRUST_ISSUER` | Required | The OpenID Configuration Issuer from the Cloudflare Zero Trust application provider details page | +| `NEXTAUTH_URL` | Required | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It only needs to be set when the default generated redirect address is incorrect. `https://example.com/api/auth` | Go to [📘 Environment Variables](/docs/self-hosting/environment-variable#Cloudflare%20Zero%20Trust) for details about the variables. diff --git a/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.zh-CN.mdx index b75bc404c54d..55bdf5a61269 100644 --- a/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/cloudflare-zero-trust.zh-CN.mdx @@ -52,7 +52,7 @@ tags: | `CLOUDFLARE_ZERO_TRUST_CLIENT_ID` | 必选 | 在 Cloudflare Zero Trust 生成的 `Client ID`,示例值是 `lobe-chat` | | `CLOUDFLARE_ZERO_TRUST_CLIENT_SECRET` | 必选 | 在 Cloudflare Zero Trust 生成的 `Client secret`,示例值是 `insecure_secret` | | `CLOUDFLARE_ZERO_TRUST_ISSUER` | 必选 | 在 Cloudflare Zero Trust 生成的 `Issuer`,例如 `https://example.cloudflareaccess.com/cdn-cgi/access/sso/oidc/7db0f` | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://chat.example.com/api/auth` | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://chat.example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variable#Cloudflare%20Zero%20Trust) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/github.mdx b/docs/self-hosting/advanced/auth/next-auth/github.mdx index 8f4b38408e32..4fac6487d2cc 100644 --- a/docs/self-hosting/advanced/auth/next-auth/github.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/github.mdx @@ -84,9 +84,9 @@ When deploying LobeChat, you need to configure the following environment variabl | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | Key used to encrypt Auth.js session tokens. You can generate the key using the command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the Single Sign-On provider for LobeChat. Use `github` for Github. | -| `GITHUB_CLIENT_ID` | Required | Client ID in the Github App details page. | -| `GITHUB_CLIENT_SECRET` | Required | Client Secret in the Github App details page. | -| `NEXTAUTH_URL` | Optional | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. Only set it if the default generated redirect address is incorrect. `https://example.com/api/auth` | +| `AUTH_GITHUB_ID` | Required | Client ID in the Github App details page. | +| `AUTH_GITHUB_SECRET` | Required | Client Secret in the Github App details page. | +| `NEXTAUTH_URL` | Required | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. Only set it if the default generated redirect address is incorrect. `https://example.com/api/auth` | Go to [📘 Environment Variables](/docs/self-hosting/environment-variables/auth#github) for detailed diff --git a/docs/self-hosting/advanced/auth/next-auth/github.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/github.zh-CN.mdx index ada5afc74c72..96996d7c81ab 100644 --- a/docs/self-hosting/advanced/auth/next-auth/github.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/github.zh-CN.mdx @@ -80,9 +80,9 @@ tags: | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Github 请填写 `github`。 | -| `GITHUB_CLIENT_ID` | 必选 | Github App详情页的 客户端 ID | -| `GITHUB_CLIENT_SECRET` | 必选 | Github App详情页的 客户端 Secret | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | +| `AUTH_GITHUB_ID` | 必选 | Github App详情页的 客户端 ID | +| `AUTH_GITHUB_SECRET` | 必选 | Github App详情页的 客户端 Secret | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variables/auth#github) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/logto.mdx b/docs/self-hosting/advanced/auth/next-auth/logto.mdx index 276a5af0b15d..7c63e434eb76 100644 --- a/docs/self-hosting/advanced/auth/next-auth/logto.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/logto.mdx @@ -60,10 +60,10 @@ If you are using Logto Cloud, assume its endpoint domain is `https://example.log | ------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | `NEXT_AUTH_SECRET` | Required | The key used to encrypt Auth.js session tokens. You can generate a key using the command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the single sign-on provider for LobeChat. For Logto, enter `logto`. | - | `LOGTO_CLIENT_ID` | Required | The Client ID from the Logto App details page | - | `LOGTO_CLIENT_SECRET` | Required | The Client Secret from the Logto App details page | - | `LOGTO_ISSUER` | Required | OpenID Connect issuer of the Logto provider | - | `NEXTAUTH_URL` | Optional | This URL specifies the callback address for Auth.js during OAuth verification, needed only if the default generated redirect address is incorrect. `https://lobe.example.com/api/auth` | + | `AUTH_LOGTO_ID` | Required | The Client ID from the Logto App details page | + | `AUTH_LOGTO_SECRET` | Required | The Client Secret from the Logto App details page | + | `AUTH_LOGTO_ISSUER` | Required | OpenID Connect issuer of the Logto provider | + | `NEXTAUTH_URL` | Required | This URL specifies the callback address for Auth.js during OAuth verification, needed only if the default generated redirect address is incorrect. `https://lobe.example.com/api/auth` | Visit [📘 Environment Variables](/docs/self-hosting/environment-variables/auth#logto) for details on related variables. diff --git a/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx index 632e2bc9a70b..7aee10581893 100644 --- a/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/logto.zh-CN.mdx @@ -64,10 +64,10 @@ tags: | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Logto 请填写 `logto`。 | -| `LOGTO_CLIENT_ID` | 必选 | Logto App 详情页的 Client ID | -| `LOGTO_CLIENT_SECRET` | 必选 | Logto App 详情页的 Client Secret | -| `LOGTO_ISSUER` | 必选 | Logto 提供程序的 OpenID Connect 颁发者 | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://lobe.example.com/api/auth` | +| `AUTH_LOGTO_ID` | 必选 | Logto App 详情页的 Client ID | +| `AUTH_LOGTO_SECRET` | 必选 | Logto App 详情页的 Client Secret | +| `AUTH_LOGTO_ISSUER` | 必选 | Logto 提供程序的 OpenID Connect 颁发者 | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://lobe.example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variables/auth#logto) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.mdx b/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.mdx index 52c451c111c2..70cf0b1f1552 100644 --- a/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.mdx @@ -73,10 +73,10 @@ When deploying LobeChat, you need to configure the following environment variabl | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | Key used to encrypt Auth.js session tokens. You can generate the key using the following command: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the single sign-on provider for LoboChat. Use `azure-ad` for Microsoft Entra ID. | -| `AZURE_AD_CLIENT_ID` | Required | Client ID of the Microsoft Entra ID application. | -| `AZURE_AD_CLIENT_SECRET` | Required | Client Secret of the Microsoft Entra ID application. | -| `AZURE_AD_TENANT_ID` | Required | Tenant ID of the Microsoft Entra ID application. | -| `NEXTAUTH_URL` | Optional | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It is only necessary to set it when the default generated redirect address is incorrect. `https://example.com/api/auth` | +| `AUTH_AZURE_AD_ID` | Required | Client ID of the Microsoft Entra ID application. | +| `AUTH_AZURE_AD_SECRET` | Required | Client Secret of the Microsoft Entra ID application. | +| `AUTH_AZURE_AD_TENANT_ID` | Required | Tenant ID of the Microsoft Entra ID application. | +| `NEXTAUTH_URL` | Required | This URL is used to specify the callback address for Auth.js when performing OAuth authentication. It is only necessary to set it when the default generated redirect address is incorrect. `https://example.com/api/auth` | You can refer to [📘 environment diff --git a/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.zh-CN.mdx index aed710194cb4..e1fcc48427c9 100644 --- a/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/microsoft-entra-id.zh-CN.mdx @@ -70,10 +70,10 @@ https://your-domain/api/auth/callback/azure-ad | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成秘钥: `openssl rand -base64 32` | | `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 选择 LoboChat 的单点登录提供商。使用 Microsoft Entra ID 请填写 `azure-ad`。 | -| `AZURE_AD_CLIENT_ID` | 必选 | Microsoft Entra ID 应用程序的 Client ID | -| `AZURE_AD_CLIENT_SECRET` | 必选 | Microsoft Entra ID 应用程序的 Client Secret | -| `AZURE_AD_TENANT_ID` | 必选 | Microsoft Entra ID 应用程序的 Tenant ID | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | +| `AUTH_AZURE_AD_ID` | 必选 | Microsoft Entra ID 应用程序的 Client ID | +| `AUTH_AZURE_AD_SECRET` | 必选 | Microsoft Entra ID 应用程序的 Client Secret | +| `AUTH_AZURE_AD_TENANT_ID` | 必选 | Microsoft Entra ID 应用程序的 Tenant ID | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 在执行 OAuth 验证时的回调地址,当默认生成的重定向地址发生不正确时才需要设置。`https://example.com/api/auth` | 前往 [📘 环境变量](/zh/docs/self-hosting/environment-variable#microsoft-entra-id) 可查阅相关变量详情。 diff --git a/docs/self-hosting/advanced/auth/next-auth/zitadel.mdx b/docs/self-hosting/advanced/auth/next-auth/zitadel.mdx index 780d95b65ebb..cb2395680e65 100644 --- a/docs/self-hosting/advanced/auth/next-auth/zitadel.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/zitadel.mdx @@ -100,11 +100,11 @@ When deploying LobeChat, you need to configure the following environment variabl | Environment Variable | Type | Description | | --- | --- | --- | | `NEXT_AUTH_SECRET` | Required | Key used to encrypt Auth.js session tokens. You can generate a key using the following command: `openssl rand -base64 32` | -| `NEXT_AUTH_SSO_PROVIDERS` | Optional | Select the single sign-on provider for LoboChat. Use `zitadel` for ZITADEL. | -| `ZITADEL_CLIENT_ID` | Required | Client ID (`ClientId` as shown in ZITADEL) of the ZITADEL application | -| `ZITADEL_CLIENT_SECRET` | Required | Client Secret (`ClientSecret` as shown in ZITADEL) of the ZITADEL application | -| `ZITADEL_ISSUER` | Required | Issuer URL of the ZITADEL application | -| `NEXTAUTH_URL` | Optional | The URL is used to specify the callback address for the execution of OAuth authentication in Auth.js. It needs to be set only when the default address is incorrect. `https://example.com/api/auth` | +| `NEXT_AUTH_SSO_PROVIDERS` | Required | Select the single sign-on provider for LoboChat. Use `zitadel` for ZITADEL. | +| `AUTH_ZITADEL_ID` | Required | Client ID (`ClientId` as shown in ZITADEL) of the ZITADEL application | +| `AUTH_ZITADEL_SECRET` | Required | Client Secret (`ClientSecret` as shown in ZITADEL) of the ZITADEL application | +| `AUTH_ZITADEL_ISSUER` | Required | Issuer URL of the ZITADEL application | +| `NEXTAUTH_URL` | Required | The URL is used to specify the callback address for the execution of OAuth authentication in Auth.js. It needs to be set only when the default address is incorrect. `https://example.com/api/auth` | You can refer to the related variable details at [📘Environment Variables](/docs/self-hosting/environment-variables/auth#zitadel). diff --git a/docs/self-hosting/advanced/auth/next-auth/zitadel.zh-CN.mdx b/docs/self-hosting/advanced/auth/next-auth/zitadel.zh-CN.mdx index 0224abd10601..5599247c4c02 100644 --- a/docs/self-hosting/advanced/auth/next-auth/zitadel.zh-CN.mdx +++ b/docs/self-hosting/advanced/auth/next-auth/zitadel.zh-CN.mdx @@ -96,11 +96,11 @@ http(s)://your-domain/api/auth/callback/zitadel | 环境变量 | 类型 | 描述 | | --- | --- | --- | | `NEXT_AUTH_SECRET` | 必选 | 用于加密 Auth.js 会话令牌的密钥。您可以使用以下命令生成密钥:`openssl rand -base64 32` | -| `NEXT_AUTH_SSO_PROVIDERS` | 可选 | 为 LobeChat 选择单点登录提供程序。对于 ZITADEL,请填写 `zitadel`。 | -| `ZITADEL_CLIENT_ID` | 必选 | ZITADEL 应用的 Client ID(`ClientId`)。 | -| `ZITADEL_CLIENT_SECRET` | 必选 | ZITADEL 应用的 Client Secret(`ClientSecret`)。 | -| `ZITADEL_ISSUER` | 必选 | ZITADEL 应用的 OpenID Connect 颁发者(issuer)URL。 | -| `NEXTAUTH_URL` | 可选 | 该 URL 用于指定 Auth.js 中执行 OAuth 认证的回调地址。仅当默认地址不正确时才需要设置。`https://example.com/api/auth` | +| `NEXT_AUTH_SSO_PROVIDERS` | 必选 | 为 LobeChat 选择单点登录提供程序。对于 ZITADEL,请填写 `zitadel`。 | +| `AUTH_ZITADEL_ID` | 必选 | ZITADEL 应用的 Client ID(`ClientId`)。 | +| `AUTH_ZITADEL_SECRET` | 必选 | ZITADEL 应用的 Client Secret(`ClientSecret`)。 | +| `AUTH_ZITADEL_ISSUER` | 必选 | ZITADEL 应用的 OpenID Connect 颁发者(issuer)URL。 | +| `NEXTAUTH_URL` | 必选 | 该 URL 用于指定 Auth.js 中执行 OAuth 认证的回调地址。仅当默认地址不正确时才需要设置。`https://example.com/api/auth` | 您可以在 [📘 环境变量](/zh/docs/self-hosting/environment-variables/auth#zitadel) 中查看相关变量的详细信息。 diff --git a/docs/self-hosting/environment-variables/model-provider.mdx b/docs/self-hosting/environment-variables/model-provider.mdx index 515b6d6b7263..e6377bff4e75 100644 --- a/docs/self-hosting/environment-variables/model-provider.mdx +++ b/docs/self-hosting/environment-variables/model-provider.mdx @@ -114,6 +114,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: `https://generativelanguage.googleapis.com` - Example: `https://api.genai.gd.edu.kg/google` +### `GOOGLE_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+gemini-1.5-flash-latest,+gemini-1.5-pro-latest` + ## Anthropic AI ### `ANTHROPIC_API_KEY` @@ -185,6 +192,22 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: `-` - Example: `-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta` +## Github + +### `GITHUB_TOKEN` + +- Type: Required +- Description: This is your Personal access tokens you got for in the Github +- Default: `-` +- Example:`ghp_xxxxxx...xxxxxx=` + +### `GITHUB_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+gpt-4o,+gpt-4o-mini` + ## TogetherAI ### `TOGETHERAI_API_KEY` @@ -199,7 +222,23 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Type: Optional - Description: Used to specify a custom TogetherAI model list. Model definition syntax rules see [model-list][model-list] - Default: `-` -- Example: `01-ai/yi-34b-chat` +- Example: `-all,+meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo,+meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo` + +## Fireworks AI + +### `FIREWORKSAI_API_KEY` + +- Type: Required +- Description: This is the API key you applied for in the Fireworks AI service +- Default: `-` +- Example:`xxxxxx...xxxxxx` + +### `FIREWORKSAI_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+accounts/fireworks/models/firefunction-v2,+accounts/fireworks/models/firefunction-v1` ## Ollama @@ -242,6 +281,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: - - Example: `pplx-xxxxxx...xxxxxx` +### `PERPLEXITY_PROXY_URL` + +- Type: Optional +- Description: If you manually configure the Perplexity API proxy, you can use this configuration item to override the default Perplexity API request base URL +- Default: `https://api.Perplexity.ai` +- Example: `https://my-Perplexity-proxy.com` + ## Minimax AI ### `MINIMAX_API_KEY` @@ -269,6 +315,20 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: - - Example: `gsk_xxxxxx...xxxxxx` +### `GROQ_PROXY_URL` + +- Type: Optional +- Description: If you manually configure the Groq API proxy, you can use this configuration item to override the default Groq API request base URL +- Default: `https://api.groq.com/openai/v1` +- Example: `https://my-groq-proxy.com/v1` + +### `GROQ_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+gemma2-9b-it,+llama-3.1-8b-instant` + ## ZHIPU AI ### `ZHIPU_API_KEY` @@ -278,6 +338,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: - - Example: `4582d332441a313f5c2ed9824d1798ca.rC8EcTAhgbOuAuVT` +### `ZHIPU_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+glm-4-alltools,+glm-4-plus` + ## 01.AI ### `ZEROONE_API_KEY` @@ -287,6 +354,13 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: - - Example:`xxxxxx...xxxxxx` +### `ZEROONE_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+yi-large,+yi-large-rag` + ## Qwen ### `QWEN_API_KEY` @@ -296,4 +370,113 @@ If you need to use Azure OpenAI to provide model services, you can refer to the - Default: - - Example:`sk-xxxxx...xxxxx` +### `QWEN_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+qwen-turbo-latest,+qwen-plus-latest` + +## Stepfun AI + +### `STEPFUN_API_KEY` + +- Type: Required +- Description: This is the DashScope API key you can obtain from Stepfun AI service +- Default: - +- Example:`sk-xxxxx...xxxxx` + +## Novita AI + +### `NOVITA_API_KEY` + +- Type: Required +- Description: This is the API key you applied for in the Novita AI service +- Default: - +- Example:`xxxxxx...xxxxxx` + +### `NOVITA_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+meta-llama/llama-3.1-8b-instruct,+meta-llama/llama-3.1-70b-instruct` + +## BAICHUAN + +### `BAICHUAN_API_KEY` + +- Type: Required +- Description: This is the API key you applied for in the BAICHUAN service +- Default: - +- Example:`xxxxxx...xxxxxx` + +## TAICHU + +### `TAICHU_API_KEY` + +- Type: Required +- Description: This is the API key you applied for in the TAICHU service +- Default: - +- Example:`xxxxxx...xxxxxx` + +## 360 AI + +### `AI360_API_KEY` + +- Type: Required +- Description: This is the API key you applied for in the 360 AI service +- Default: - +- Example:`xxxxxx...xxxxxx` + +## Siliconflow + +### `SILICONCLOUD_API_KEY` + +- Type: Required +- Description: This is the API key you applied from Siliconflow service +- Default: - +- Example: `xxxxxx...xxxxxx` + +### `SILICONCLOUD_PROXY_URL` + +- Type: Optional +- Description: If you manually configure the Siliconflow API proxy, you can use this configuration item to override the default Siliconflow API request base URL +- Default: `https://api.siliconflow.cn/v1` +- Example: `https://my-siliconflow-proxy.com/v1` + +### `SILICONCLOUD_MODEL_LIST` + +- Type: Optional +- Description: Used to control the model list, use `+` to add a model, use `-` to hide a model, use `model_name=display_name` to customize the display name of a model, separated by commas. Definition syntax rules see [model-list][model-list] +- Default: `-` +- Example: `-all,+deepseek-ai/DeepSeek-V2.5,+Qwen/Qwen2.5-7B-Instruct` + +## Upstage AI + +### `UPSTAGE_API_KEY` + +- Type: Required +- Description: This is the API key you applied from Upstage AI service +- Default: - +- Example: `xxxxxx...xxxxxx` + +## Spark AI + +### `SPARK_API_KEY` + +- Type: Required +- Description: This is the API key you applied from Spark AI service +- Default: - +- Example: `xxxxxx...xxxxxx` + +## A21 AI + +### `AI21_API_KEY` + +- Type: Required +- Description: This is the API key you applied from AI21_API_KEY service +- Default: - +- Example: `xxxxxx...xxxxxx` + [model-list]: /docs/self-hosting/advanced/model-list diff --git a/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx b/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx index ff49e57f6728..e56b07633ea4 100644 --- a/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx +++ b/docs/self-hosting/environment-variables/model-provider.zh-CN.mdx @@ -112,6 +112,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 默认值:`https://generativelanguage.googleapis.com` - 示例:`https://api.genai.gd.edu.kg/google` +### `GOOGLE_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+gemini-1.5-flash-latest,+gemini-1.5-pro-latest` + ## Anthropic AI ### `ANTHROPIC_API_KEY` @@ -179,10 +186,26 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, ### `OPENROUTER_MODEL_LIST` - 类型:可选 -- 描述:用于指定自定义 OpenRouter 模型列表。模型定义语法规则见 [模型列表][model-list] +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] - 默认值:`-` - 示例:`-all,+01-ai/yi-34b-chat,+huggingfaceh4/zephyr-7b-beta` +## Github + +### `GITHUB_TOKEN` + +- 类型:必选 +- 描述:这是你在 Github 申请的 Personal access tokens +- 默认值:- +- 示例:`ghp_xxxxxx...xxxxxx=` + +### `GITHUB_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+gpt-4o,+gpt-4o-mini` + ## TogetherAI ### `TOGETHERAI_API_KEY` @@ -195,9 +218,25 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, ### `TOGETHERAI_MODEL_LIST` - 类型:可选 -- 描述:用于指定自定义 Together AI 的模型列表。模型定义语法规则见 [模型列表][model-list] +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] - 默认值:`-` -- 示例:`01-ai/yi-34b-chat` +- 示例:`-all,+meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo,+meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo` + +## Fireworks AI + +### `FIREWORKSAI_API_KEY` + +- 类型:必选 +- 描述:这是你在 Fireworks AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +### `FIREWORKSAI_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+accounts/fireworks/models/firefunction-v2,+accounts/fireworks/models/firefunction-v1` ## Ollama @@ -240,6 +279,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 默认值:- - 示例:`pplx-xxxxxx...xxxxxx` +### `PERPLEXITY_PROXY_URL` + +- 类型:可选 +- 描述:如果你手动配置了 Perplexity 接口代理,可以使用此配置项来覆盖默认的 Perplexity API 请求基础 URL +- 默认值:`https://api.Perplexity.ai` +- 示例:`https://my-Perplexity-proxy.com` + ## Minimax AI ### `MINIMAX_API_KEY` @@ -267,6 +313,20 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 默认值:- - 示例:`gsk_xxxxxx...xxxxxx` +### `GROQ_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+gemma2-9b-it,+llama-3.1-8b-instant` + +### `GROQ_PROXY_URL` + +- 类型:可选 +- 描述:如果你手动配置了 Groq 接口代理,可以使用此配置项来覆盖默认的 Groq API 请求基础 URL +- 默认值:`https://api.groq.com/openai/v1` +- 示例:`https://my-groq-proxy.com/v1` + ## 智谱 AI ### `ZHIPU_API_KEY` @@ -276,6 +336,13 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 默认值:- - 示例:`4582d332441a313f5c2ed9824d1798ca.rC8EcTAhgbOuAuVT` +### `ZHIPU_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+glm-4-alltools,+glm-4-plus` + ## 01 AI ### `ZEROONE_API_KEY` @@ -285,7 +352,12 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 默认值:- - 示例:`xxxxxx...xxxxxx` -[model-list]: /zh/docs/self-hosting/advanced/model-list +### `ZEROONE_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+yi-large,+yi-large-rag` ## 通义千问 @@ -295,3 +367,114 @@ LobeChat 在部署时提供了丰富的模型服务商相关的环境变量, - 描述:这是你在阿里云百炼平台上获取的 DashScope API 密钥 - 默认值:- - 示例:`sk-xxxxx...xxxxx` + +### `QWEN_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+qwen-turbo-latest,+qwen-plus-latest` + +## Stepfun AI + +### `STEPFUN_API_KEY` + +- 类型:必选 +- 描述:这是你在 Stepfun AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## Novita AI + +### `NOVITA_API_KEY` + +- 类型:必选 +- 描述:这是你在 Novita AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +### `NOVITA_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+meta-llama/llama-3.1-8b-instruct,+meta-llama/llama-3.1-70b-instruct` + +## 百川 + +### `BAICHUAN_API_KEY` + +- 类型:必选 +- 描述:这是你在 百川智能 服务平台申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## 紫东太初 + +### `TAICHU_API_KEY` + +- 类型:必选 +- 描述:这是你在 紫东太初 服务平台申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## 360 AI + +### `AI360_API_KEY` + +- 类型:必选 +- 描述:这是你在 360智脑 服务平台申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## Siliconflow + +### `SILICONCLOUD_API_KEY` + +- 类型:必选 +- 描述:这是你在 Siliconflow 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +### `SILICONCLOUD_MODEL_LIST` + +- 类型:可选 +- 描述:用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名<扩展配置>` 来自定义模型的展示名,用英文逗号隔开。模型定义语法规则见 [模型列表][model-list] +- 默认值:`-` +- 示例:`-all,+deepseek-ai/DeepSeek-V2.5,+Qwen/Qwen2.5-7B-Instruct` + +### `SILICONCLOUD_PROXY_URL` + +- 类型:可选 +- 描述:如果你手动配置了 Siliconflow 接口代理,可以使用此配置项来覆盖默认的 Siliconflow API 请求基础 URL +- 默认值:`https://api.siliconflow.cn/v1` +- 示例:`https://my-siliconflow-proxy.com/v1` + +## Upstage AI + +### `UPSTAGE_API_KEY` + +- 类型:必选 +- 描述:这是你在 Upstage AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## Spark AI + +### `SPARK_API_KEY` + +- 类型:必选 +- 描述:这是你在 Spark AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +## A21 AI + +### `AI21_API_KEY` + +- 类型:必选 +- 描述:这是你在 A21 AI 服务中申请的 API 密钥 +- 默认值:- +- 示例:`xxxxxx...xxxxxx` + +[model-list]: /zh/docs/self-hosting/advanced/model-list diff --git a/docs/self-hosting/server-database/docker-compose.mdx b/docs/self-hosting/server-database/docker-compose.mdx index 55eaace8e94c..edbd63435b0c 100644 --- a/docs/self-hosting/server-database/docker-compose.mdx +++ b/docs/self-hosting/server-database/docker-compose.mdx @@ -69,8 +69,8 @@ mkdir lobe-chat-db Pull the configuration files into your directory: ```sh -curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml > docker-compose.yml -curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/.env.example > .env +curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local-logto/docker-compose.yml > docker-compose.yml +curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local-logto/.env.example > .env ``` ### Start Services diff --git a/docs/self-hosting/server-database/docker-compose.zh-CN.mdx b/docs/self-hosting/server-database/docker-compose.zh-CN.mdx index 8952ed47686a..a518144a940f 100644 --- a/docs/self-hosting/server-database/docker-compose.zh-CN.mdx +++ b/docs/self-hosting/server-database/docker-compose.zh-CN.mdx @@ -67,8 +67,8 @@ mkdir lobe-chat-db 拉取配置文件到你的目录中: ```sh -curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/docker-compose.yml > docker-compose.yml -curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local/.env.zh-CN.example > .env +curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local-logto/docker-compose.yml > docker-compose.yml +curl -fsSL https://raw.githubusercontent.com/lobehub/lobe-chat/HEAD/docker-compose/local-logto/.env.zh-CN.example > .env ``` ### 启动服务 diff --git a/locales/ar/chat.json b/locales/ar/chat.json index dbce8c9b0905..51659cea863a 100644 --- a/locales/ar/chat.json +++ b/locales/ar/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "مرحبًا، أنا **{{name}}**، يمكنك بدء المحادثة معي على الفور، أو يمكنك الذهاب إلى [إعدادات المساعد]({{url}}) لإكمال معلوماتي.", "agentDefaultMessageWithSystemRole": "مرحبًا، أنا **{{name}}**، {{systemRole}}، دعنا نبدأ الدردشة!", "agentDefaultMessageWithoutEdit": "مرحبًا، أنا **{{name}}**، دعنا نبدأ المحادثة!", - "agentsAndConversations": "الوكلاء والمحادثات", + "agents": "مساعد", "artifact": { "generating": "جاري الإنشاء", "thinking": "جاري التفكير", @@ -81,7 +81,7 @@ }, "regenerate": "إعادة الإنشاء", "roleAndArchive": "الدور والأرشيف", - "searchAgentPlaceholder": "البحث عن مساعد ومحادثة...", + "searchAgentPlaceholder": "مساعد البحث...", "sendPlaceholder": "أدخل محتوى الدردشة...", "sessionGroup": { "config": "إدارة المجموعات", diff --git a/locales/ar/models.json b/locales/ar/models.json index 4daff54a3189..2173242ffb95 100644 --- a/locales/ar/models.json +++ b/locales/ar/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) هو نموذج مبتكر، مناسب لتطبيقات متعددة المجالات والمهام المعقدة." }, + "Max-32k": { + "description": "Spark Max 32K مزود بقدرة معالجة سياقية كبيرة، وفهم أقوى للسياق وقدرة على الاستدلال المنطقي، يدعم إدخال نصوص تصل إلى 32K توكن، مناسب لقراءة الوثائق الطويلة، وأسئلة وأجوبة المعرفة الخاصة، وغيرها من السيناريوهات." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO هو دمج متعدد النماذج مرن للغاية، يهدف إلى تقديم تجربة إبداعية ممتازة." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 هو نموذج متعدد الوسائط فعال، يدعم التوسع في التطبيقات الواسعة." }, + "gemini-1.5-flash-002": { + "description": "جمني 1.5 فلاش 002 هو نموذج متعدد الوسائط فعال، يدعم توسيع التطبيقات على نطاق واسع." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 مصمم لمعالجة سيناريوهات المهام الكبيرة، ويوفر سرعة معالجة لا مثيل لها." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "جمني 1.5 فلاش 8B 0924 هو النموذج التجريبي الأحدث، حيث حقق تحسينات ملحوظة في الأداء في حالات الاستخدام النصية ومتعددة الوسائط." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 يوفر قدرات معالجة متعددة الوسائط محسّنة، مناسبة لمجموعة متنوعة من سيناريوهات المهام المعقدة." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 هو حل ذكاء اصطناعي متعدد الوسائط قابل للتوسع، يدعم مجموعة واسعة من المهام المعقدة." }, + "gemini-1.5-pro-002": { + "description": "جمني 1.5 برو 002 هو النموذج الأحدث الجاهز للإنتاج، حيث يقدم مخرجات ذات جودة أعلى، مع تحسينات ملحوظة خاصة في الرياضيات والسياقات الطويلة والمهام البصرية." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 يوفر قدرات معالجة متعددة الوسائط ممتازة، مما يوفر مرونة أكبر لتطوير التطبيقات." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "نموذج اللغة الكبير TaiChu يتمتع بقدرات قوية في فهم اللغة، بالإضافة إلى إنشاء النصوص، والإجابة على الأسئلة، وبرمجة الأكواد، والحسابات الرياضية، والاستدلال المنطقي، وتحليل المشاعر، وتلخيص النصوص. يجمع بشكل مبتكر بين التدريب المسبق على البيانات الضخمة والمعرفة الغنية من مصادر متعددة، من خلال تحسين تقنيات الخوارزميات باستمرار واستيعاب المعرفة الجديدة من البيانات النصية الضخمة، مما يحقق تطورًا مستمرًا في أداء النموذج. يوفر للمستخدمين معلومات وخدمات أكثر سهولة وتجربة أكثر ذكاءً." }, + "taichu_vqa": { + "description": "تايتشو 2.0V يجمع بين فهم الصور، ونقل المعرفة، والاستدلال المنطقي، ويظهر أداءً بارزًا في مجال الأسئلة والأجوبة النصية والصورية." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) يوفر قدرة حسابية معززة من خلال استراتيجيات فعالة وهندسة نموذجية." }, diff --git a/locales/bg-BG/chat.json b/locales/bg-BG/chat.json index 6168aacdfe31..e07eb8262ae6 100644 --- a/locales/bg-BG/chat.json +++ b/locales/bg-BG/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Здравейте, аз съм **{{name}}**, можете да започнете разговор с мен веднага или да отидете на [Настройки на асистента]({{url}}), за да попълните информацията ми.", "agentDefaultMessageWithSystemRole": "Здравей, аз съм **{{name}}**, {{systemRole}}. Нека започнем да чатим!", "agentDefaultMessageWithoutEdit": "Здравей, аз съм **{{name}}** и нека започнем разговора!", - "agentsAndConversations": "агенти и разговори", + "agents": "Асистент", "artifact": { "generating": "Генериране", "thinking": "В процес на мислене", @@ -81,7 +81,7 @@ }, "regenerate": "Прегенерирай", "roleAndArchive": "Роля и архив", - "searchAgentPlaceholder": "Търсене на агенти и разговори...", + "searchAgentPlaceholder": "Търсач на помощ...", "sendPlaceholder": "Напиши съобщението си тук...", "sessionGroup": { "config": "Управление на групи", diff --git a/locales/bg-BG/models.json b/locales/bg-BG/models.json index 171c21806eb4..4f40d01f6a79 100644 --- a/locales/bg-BG/models.json +++ b/locales/bg-BG/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) е иновативен модел, подходящ за приложения в множество области и сложни задачи." }, + "Max-32k": { + "description": "Spark Max 32K е конфигуриран с голяма способност за обработка на контекст, по-силно разбиране на контекста и логическо разсъждение, поддържа текстов вход от 32K токена, подходящ за четене на дълги документи, частни въпроси и отговори и други сценарии." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO е високо гъвкава многомоделна комбинация, предназначена да предостави изключителен креативен опит." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 е ефективен многомодален модел, който поддържа разширяване на широк спектър от приложения." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 е ефективен мултимодален модел, който поддържа разширения за широко приложение." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 е проектиран за обработка на мащабни задачи, предлагащ ненадмината скорост на обработка." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 е най-новият експериментален модел, който показва значителни подобрения в производителността както в текстови, така и в мултимодални приложения." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 предлага оптимизирани многомодални обработващи способности, подходящи за множество сложни задачи." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 е разширяемо многомодално AI решение, което поддържа широк спектър от сложни задачи." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 е най-новият модел, готов за производство, който предлага по-високо качество на изхода, особено в математически, дълги контексти и визуални задачи." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 предлага отлични способности за обработка на многомодални данни, предоставяйки по-голяма гъвкавост за разработка на приложения." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Моделът на езика TaiChu е с изключителни способности за разбиране на езика, текстово генериране, отговори на знания, програмиране, математически изчисления, логическо разсъждение, анализ на емоции, резюмиране на текст и др. Иновативно комбинира предварително обучение с големи данни и разнообразни източници на знания, чрез непрекъснато усъвършенстване на алгоритмичните технологии и усвояване на нови знания от масивни текстови данни, за да осигури на потребителите по-удобна информация и услуги, както и по-интелигентно изживяване." }, + "taichu_vqa": { + "description": "Taichu 2.0V обединява способности за разбиране на изображения, прехвърляне на знания, логическо обяснение и др., и се представя отлично в областта на въпросите и отговорите на текст и изображения." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) предлага подобрена изчислителна мощ чрез ефективни стратегии и архитектура на модела." }, diff --git a/locales/de-DE/chat.json b/locales/de-DE/chat.json index ccb00f0d4986..891721ba1715 100644 --- a/locales/de-DE/chat.json +++ b/locales/de-DE/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Hallo, ich bin **{{name}}**. Du kannst sofort mit mir sprechen oder zu den [Assistenteneinstellungen]({{url}}) gehen, um meine Informationen zu vervollständigen.", "agentDefaultMessageWithSystemRole": "Hallo, ich bin **{{name}}**, {{systemRole}}. Lass uns chatten!", "agentDefaultMessageWithoutEdit": "Hallo, ich bin **{{name}}**. Lassen Sie uns ins Gespräch kommen!", - "agentsAndConversations": "Agenten und Unterhaltungen", + "agents": "Assistent", "artifact": { "generating": "Wird generiert", "thinking": "Denken", @@ -81,7 +81,7 @@ }, "regenerate": "Neu generieren", "roleAndArchive": "Rolle und Archiv", - "searchAgentPlaceholder": "Assistenten und Unterhaltungen durchsuchen...", + "searchAgentPlaceholder": "Suchassistent...", "sendPlaceholder": "Chat-Nachricht eingeben...", "sessionGroup": { "config": "Gruppenkonfiguration", diff --git a/locales/de-DE/models.json b/locales/de-DE/models.json index 383bf120facc..bfa79a82e2dc 100644 --- a/locales/de-DE/models.json +++ b/locales/de-DE/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) ist ein innovatives Modell, das sich für Anwendungen in mehreren Bereichen und komplexe Aufgaben eignet." }, + "Max-32k": { + "description": "Spark Max 32K ist mit einer hohen Kontextverarbeitungsfähigkeit ausgestattet, die ein besseres Verständnis des Kontexts und eine stärkere logische Schlussfolgerung ermöglicht. Es unterstützt Texteingaben von bis zu 32K Tokens und eignet sich für Szenarien wie das Lesen langer Dokumente und private Wissensabfragen." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO ist eine hochflexible Multi-Modell-Kombination, die darauf abzielt, außergewöhnliche kreative Erlebnisse zu bieten." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 ist ein effizientes multimodales Modell, das eine breite Anwendbarkeit unterstützt." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 ist ein effizientes multimodales Modell, das eine breite Palette von Anwendungen unterstützt." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 ist für die Verarbeitung großangelegter Aufgabenszenarien konzipiert und bietet unvergleichliche Verarbeitungsgeschwindigkeit." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 ist das neueste experimentelle Modell, das in Text- und multimodalen Anwendungsfällen erhebliche Leistungsverbesserungen aufweist." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 bietet optimierte multimodale Verarbeitungsfähigkeiten, die für verschiedene komplexe Aufgabenszenarien geeignet sind." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 ist eine skalierbare multimodale KI-Lösung, die eine breite Palette komplexer Aufgaben unterstützt." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 ist das neueste produktionsbereite Modell, das eine höhere Ausgabequalität bietet, insbesondere bei mathematischen, langen Kontexten und visuellen Aufgaben erhebliche Verbesserungen aufweist." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 bietet hervorragende multimodale Verarbeitungsfähigkeiten und bringt mehr Flexibilität in die Anwendungsentwicklung." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Das Zīdōng Taichu Sprachmodell verfügt über außergewöhnliche Sprachverständnisfähigkeiten sowie Fähigkeiten in Textgenerierung, Wissensabfrage, Programmierung, mathematischen Berechnungen, logischem Denken, Sentimentanalyse und Textzusammenfassung. Es kombiniert innovativ große Datenvortrainings mit reichhaltigem Wissen aus mehreren Quellen, verfeinert kontinuierlich die Algorithmen und absorbiert ständig neues Wissen aus umfangreichen Textdaten in Bezug auf Vokabular, Struktur, Grammatik und Semantik, um die Leistung des Modells kontinuierlich zu verbessern. Es bietet den Nutzern bequemere Informationen und Dienstleistungen sowie ein intelligenteres Erlebnis." }, + "taichu_vqa": { + "description": "Taichu 2.0V vereint Fähigkeiten wie Bildverständnis, Wissensübertragung und logische Attribution und zeigt herausragende Leistungen im Bereich der Bild-Text-Fragen." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) bietet durch effiziente Strategien und Modellarchitekturen verbesserte Rechenfähigkeiten." }, diff --git a/locales/en-US/chat.json b/locales/en-US/chat.json index d6ba15d7e8a3..2d1dd4348cad 100644 --- a/locales/en-US/chat.json +++ b/locales/en-US/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Hello, I am **{{name}}**. You can start a conversation with me right away, or you can go to [Assistant Settings]({{url}}) to complete my information.", "agentDefaultMessageWithSystemRole": "Hello, I'm **{{name}}**, {{systemRole}}. Let's start chatting!", "agentDefaultMessageWithoutEdit": "Hello, I'm **{{name}}**, let's start chatting!", - "agentsAndConversations": "Assistants and Conversations", + "agents": "Assistants", "artifact": { "generating": "Generating", "thinking": "Thinking", @@ -81,7 +81,7 @@ }, "regenerate": "Regenerate", "roleAndArchive": "Role and Archive", - "searchAgentPlaceholder": "Search assistants and conversations...", + "searchAgentPlaceholder": "Search assistants...", "sendPlaceholder": "Type your message here...", "sessionGroup": { "config": "Group Management", diff --git a/locales/en-US/models.json b/locales/en-US/models.json index df47a33df229..46161be50607 100644 --- a/locales/en-US/models.json +++ b/locales/en-US/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) is an innovative model suitable for multi-domain applications and complex tasks." }, + "Max-32k": { + "description": "Spark Max 32K is equipped with enhanced context processing capabilities, stronger context understanding, and logical reasoning abilities, supporting text input of up to 32K tokens, suitable for scenarios such as long document reading and private knowledge Q&A." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO is a highly flexible multi-model fusion designed to provide an exceptional creative experience." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 is an efficient multimodal model that supports extensive application scaling." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 is an efficient multimodal model that supports a wide range of applications." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 is designed for handling large-scale task scenarios, providing unparalleled processing speed." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 is the latest experimental model, showcasing significant performance improvements in both text and multimodal use cases." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 offers optimized multimodal processing capabilities, suitable for a variety of complex task scenarios." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 is a scalable multimodal AI solution that supports a wide range of complex tasks." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 is the latest production-ready model, delivering higher quality outputs, with notable enhancements in mathematics, long-context, and visual tasks." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 offers excellent multimodal processing capabilities, providing greater flexibility for application development." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "The ZD Taichu language model possesses strong language understanding capabilities and excels in text creation, knowledge Q&A, code programming, mathematical calculations, logical reasoning, sentiment analysis, and text summarization. It innovatively combines large-scale pre-training with rich knowledge from multiple sources, continuously refining algorithmic techniques and absorbing new knowledge in vocabulary, structure, grammar, and semantics from vast text data, resulting in an evolving model performance. It provides users with more convenient information and services, as well as a more intelligent experience." }, + "taichu_vqa": { + "description": "Taichu 2.0V integrates capabilities such as image understanding, knowledge transfer, and logical reasoning, excelling in the field of image-text question answering." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) provides enhanced computational capabilities through efficient strategies and model architecture." }, diff --git a/locales/es-ES/chat.json b/locales/es-ES/chat.json index 5aadf260a72c..a1cc6020fab0 100644 --- a/locales/es-ES/chat.json +++ b/locales/es-ES/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Hola, soy **{{name}}**. Puedes comenzar a hablar conmigo de inmediato o ir a [Configuración del asistente]({{url}}) para completar mi información.", "agentDefaultMessageWithSystemRole": "Hola, soy **{{name}}**, {{systemRole}}, ¡comencemos a chatear!", "agentDefaultMessageWithoutEdit": "¡Hola, soy **{{name}}**! Comencemos nuestra conversación.", - "agentsAndConversations": "agentesYConversaciones", + "agents": "Asistente", "artifact": { "generating": "Generando", "thinking": "Pensando", @@ -81,7 +81,7 @@ }, "regenerate": "Regenerar", "roleAndArchive": "Rol y archivo", - "searchAgentPlaceholder": "Buscar asistentes y conversaciones...", + "searchAgentPlaceholder": "Asistente de búsqueda...", "sendPlaceholder": "Escribe tu mensaje...", "sessionGroup": { "config": "Gestión de grupos", diff --git a/locales/es-ES/models.json b/locales/es-ES/models.json index 8f1f0778a0b7..f6d8cee71dc1 100644 --- a/locales/es-ES/models.json +++ b/locales/es-ES/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) es un modelo innovador, adecuado para aplicaciones en múltiples campos y tareas complejas." }, + "Max-32k": { + "description": "Spark Max 32K está equipado con una gran capacidad de procesamiento de contexto, una comprensión de contexto más fuerte y habilidades de razonamiento lógico, soporta entradas de texto de 32K tokens, adecuado para la lectura de documentos largos, preguntas y respuestas de conocimiento privado y otros escenarios." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO es una fusión de múltiples modelos altamente flexible, diseñada para ofrecer una experiencia creativa excepcional." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 es un modelo multimodal eficiente, que admite la escalabilidad para aplicaciones amplias." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 es un modelo multimodal eficiente, que admite una amplia gama de aplicaciones." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 está diseñado para manejar escenarios de tareas a gran escala, ofreciendo una velocidad de procesamiento inigualable." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 es el último modelo experimental, con mejoras significativas en el rendimiento tanto en casos de uso de texto como multimodal." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 ofrece capacidades de procesamiento multimodal optimizadas, adecuadas para una variedad de escenarios de tareas complejas." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 es una solución de IA multimodal escalable, que admite una amplia gama de tareas complejas." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 es el último modelo listo para producción, que ofrece una calidad de salida superior, especialmente en tareas matemáticas, contextos largos y tareas visuales." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 ofrece una excelente capacidad de procesamiento multimodal, brindando mayor flexibilidad para el desarrollo de aplicaciones." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "El modelo de lenguaje Taichu de Zīdōng tiene una poderosa capacidad de comprensión del lenguaje, así como habilidades en creación de textos, preguntas y respuestas, programación de código, cálculos matemáticos, razonamiento lógico, análisis de sentimientos y resúmenes de texto. Combina de manera innovadora el preentrenamiento con grandes datos y un conocimiento rico de múltiples fuentes, perfeccionando continuamente la tecnología algorítmica y absorbiendo nuevos conocimientos en vocabulario, estructura, gramática y semántica de grandes volúmenes de datos textuales, logrando una evolución constante del modelo. Proporciona a los usuarios información y servicios más convenientes, así como una experiencia más inteligente." }, + "taichu_vqa": { + "description": "Taichu 2.0V combina capacidades de comprensión de imágenes, transferencia de conocimiento y atribución lógica, destacándose en el campo de preguntas y respuestas basadas en texto e imagen." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) proporciona una capacidad de cálculo mejorada a través de estrategias y arquitecturas de modelos eficientes." }, diff --git a/locales/fr-FR/chat.json b/locales/fr-FR/chat.json index 33498bfeef84..27f4ff974022 100644 --- a/locales/fr-FR/chat.json +++ b/locales/fr-FR/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Bonjour, je suis **{{name}}**, vous pouvez commencer à discuter avec moi immédiatement ou vous rendre dans [Paramètres de l'assistant]({{url}}) pour compléter mes informations.", "agentDefaultMessageWithSystemRole": "Bonjour, je suis **{{name}}**, {{systemRole}}. Commençons la conversation !", "agentDefaultMessageWithoutEdit": "Bonjour, je suis **{{name}}**. Commençons notre conversation !", - "agentsAndConversations": "Agents et conversations", + "agents": "Assistant", "artifact": { "generating": "Génération en cours", "thinking": "En réflexion", @@ -81,7 +81,7 @@ }, "regenerate": "Regénérer", "roleAndArchive": "Rôle et archivage", - "searchAgentPlaceholder": "Rechercher des agents et des conversations...", + "searchAgentPlaceholder": "Assistant de recherche...", "sendPlaceholder": "Saisissez votre message...", "sessionGroup": { "config": "Gestion des groupes", diff --git a/locales/fr-FR/models.json b/locales/fr-FR/models.json index ced79699400d..73147b23211b 100644 --- a/locales/fr-FR/models.json +++ b/locales/fr-FR/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) est un modèle innovant, adapté à des applications dans plusieurs domaines et à des tâches complexes." }, + "Max-32k": { + "description": "Spark Max 32K est équipé d'une grande capacité de traitement de contexte, offrant une meilleure compréhension du contexte et des capacités de raisonnement logique, prenant en charge des entrées textuelles de 32K tokens, adapté à la lecture de longs documents, aux questions-réponses sur des connaissances privées et à d'autres scénarios." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO est une fusion de modèles hautement flexible, visant à offrir une expérience créative exceptionnelle." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 est un modèle multimodal efficace, prenant en charge l'extension d'applications variées." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 est un modèle multimodal efficace, prenant en charge une large gamme d'applications." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 est conçu pour traiter des scénarios de tâches à grande échelle, offrant une vitesse de traitement inégalée." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 est le dernier modèle expérimental, offrant des améliorations significatives en termes de performance dans les cas d'utilisation textuels et multimodaux." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 offre des capacités de traitement multimodal optimisées, adaptées à divers scénarios de tâches complexes." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 est une solution d'IA multimodale extensible, prenant en charge une large gamme de tâches complexes." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 est le dernier modèle prêt pour la production, offrant une qualité de sortie supérieure, avec des améliorations notables dans les domaines des mathématiques, des contextes longs et des tâches visuelles." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 offre d'excellentes capacités de traitement multimodal, apportant une plus grande flexibilité au développement d'applications." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Le modèle de langage Taichu Zidong possède une forte capacité de compréhension linguistique ainsi que des compétences en création de texte, questions-réponses, programmation, calcul mathématique, raisonnement logique, analyse des sentiments, et résumé de texte. Il combine de manière innovante le pré-entraînement sur de grandes données avec des connaissances riches provenant de multiples sources, en perfectionnant continuellement la technologie algorithmique et en intégrant de nouvelles connaissances sur le vocabulaire, la structure, la grammaire et le sens à partir de vastes ensembles de données textuelles, offrant aux utilisateurs des informations et des services plus pratiques ainsi qu'une expérience plus intelligente." }, + "taichu_vqa": { + "description": "Taichu 2.0V intègre des capacités de compréhension d'image, de transfert de connaissances et d'attribution logique, se distinguant dans le domaine des questions-réponses textuelles et visuelles." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) offre une capacité de calcul améliorée grâce à des stratégies et une architecture de modèle efficaces." }, diff --git a/locales/it-IT/chat.json b/locales/it-IT/chat.json index 525ac6d9c011..8c166b850ee0 100644 --- a/locales/it-IT/chat.json +++ b/locales/it-IT/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Ciao, sono **{{name}}**, puoi iniziare subito a parlare con me oppure andare su [Impostazioni assistente]({{url}}) per completare le mie informazioni.", "agentDefaultMessageWithSystemRole": "Ciao, sono **{{name}}**, {{systemRole}}, iniziamo a chattare!", "agentDefaultMessageWithoutEdit": "Ciao, sono **{{name}}**. Cominciamo a chiacchierare!", - "agentsAndConversations": "Assistenti e Conversazioni", + "agents": "Assistente", "artifact": { "generating": "Generazione in corso", "thinking": "In fase di riflessione", @@ -81,7 +81,7 @@ }, "regenerate": "Rigenera", "roleAndArchive": "Ruolo e archivio", - "searchAgentPlaceholder": "Cerca assistente e conversazioni...", + "searchAgentPlaceholder": "Assistente di ricerca...", "sendPlaceholder": "Inserisci il testo della chat...", "sessionGroup": { "config": "Gestione gruppi", diff --git a/locales/it-IT/models.json b/locales/it-IT/models.json index b4cb9f381804..bc45405a255c 100644 --- a/locales/it-IT/models.json +++ b/locales/it-IT/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) è un modello innovativo, adatto per applicazioni in più settori e compiti complessi." }, + "Max-32k": { + "description": "Spark Max 32K è dotato di una grande capacità di elaborazione del contesto, con una comprensione e un ragionamento logico più potenti, supporta l'input di testo fino a 32K token, adatto per la lettura di documenti lunghi, domande e risposte su conoscenze private e altri scenari." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO è un modello altamente flessibile, progettato per offrire un'esperienza creativa eccezionale." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 è un modello multimodale efficiente, supporta l'espansione per applicazioni ampie." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 è un modello multimodale altamente efficiente, che supporta un'ampia gamma di applicazioni." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 è progettato per gestire scenari di compiti su larga scala, offrendo una velocità di elaborazione senza pari." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 è il modello sperimentale più recente, con miglioramenti significativi nelle prestazioni sia nei casi d'uso testuali che multimodali." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 offre capacità di elaborazione multimodale ottimizzate, adatte a vari scenari di compiti complessi." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 è una soluzione AI multimodale scalabile, supporta un'ampia gamma di compiti complessi." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 è il modello più recente pronto per la produzione, che offre output di qualità superiore, con miglioramenti significativi in particolare in matematica, contesti lunghi e compiti visivi." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 offre eccellenti capacità di elaborazione multimodale, fornendo maggiore flessibilità per lo sviluppo delle applicazioni." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Il modello linguistico Taichu di Zīdōng ha una straordinaria capacità di comprensione del linguaggio e abilità in creazione di testi, domande di conoscenza, programmazione, calcoli matematici, ragionamento logico, analisi del sentimento e sintesi di testi. Combina in modo innovativo il pre-addestramento su grandi dati con una ricca conoscenza multi-sorgente, affinando continuamente la tecnologia degli algoritmi e assorbendo costantemente nuove conoscenze da dati testuali massivi, migliorando continuamente le prestazioni del modello. Fornisce agli utenti informazioni e servizi più convenienti e un'esperienza più intelligente." }, + "taichu_vqa": { + "description": "Taichu 2.0V integra capacità di comprensione delle immagini, trasferimento di conoscenze e attribuzione logica, eccellendo nel campo delle domande e risposte basate su testo e immagini." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) offre capacità di calcolo potenziate attraverso strategie e architetture di modelli efficienti." }, diff --git a/locales/ja-JP/chat.json b/locales/ja-JP/chat.json index 34eb81b7c2bf..fafb0c2ce1f0 100644 --- a/locales/ja-JP/chat.json +++ b/locales/ja-JP/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "こんにちは、私は **{{name}}** です。すぐに私と会話を始めることもできますし、[アシスタント設定]({{url}}) に行って私の情報を充実させることもできます。", "agentDefaultMessageWithSystemRole": "こんにちは、私は **{{name}}** です、{{systemRole}}、さあ、チャットを始めましょう!", "agentDefaultMessageWithoutEdit": "こんにちは、私は**{{name}}**です。会話しましょう!", - "agentsAndConversations": "エージェントと会話", + "agents": "エージェント", "artifact": { "generating": "生成中", "thinking": "思考中", @@ -81,7 +81,7 @@ }, "regenerate": "再生成", "roleAndArchive": "役割とアーカイブ", - "searchAgentPlaceholder": "エージェントや会話を検索...", + "searchAgentPlaceholder": "検索アシスタント...", "sendPlaceholder": "チャット内容を入力してください...", "sessionGroup": { "config": "グループ設定", diff --git a/locales/ja-JP/models.json b/locales/ja-JP/models.json index 5cb01f0db825..c1a5c3ff20e1 100644 --- a/locales/ja-JP/models.json +++ b/locales/ja-JP/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B)は、革新的なモデルであり、多分野のアプリケーションや複雑なタスクに適しています。" }, + "Max-32k": { + "description": "Spark Max 32Kは、大規模なコンテキスト処理能力を備え、より強力なコンテキスト理解と論理推論能力を持ち、32Kトークンのテキスト入力をサポートします。長文書の読解やプライベートな知識に基づく質問応答などのシーンに適しています。" + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPOは非常に柔軟なマルチモデル統合で、卓越した創造的体験を提供することを目的としています。" }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001は、効率的なマルチモーダルモデルであり、幅広いアプリケーションの拡張をサポートします。" }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002は効率的なマルチモーダルモデルで、幅広いアプリケーションの拡張をサポートしています。" + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827は、大規模なタスクシナリオの処理のために設計されており、比類のない処理速度を提供します。" }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924は最新の実験モデルで、テキストおよびマルチモーダルのユースケースにおいて顕著な性能向上を実現しています。" + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827は、最適化されたマルチモーダル処理能力を提供し、さまざまな複雑なタスクシナリオに適用できます。" }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001は、拡張可能なマルチモーダルAIソリューションであり、幅広い複雑なタスクをサポートします。" }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002は最新の生産準備モデルで、特に数学、長いコンテキスト、視覚タスクにおいて質の高い出力を提供し、顕著な向上を見せています。" + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801は、優れたマルチモーダル処理能力を提供し、アプリケーション開発における柔軟性を高めます。" }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "紫東太初言語大モデルは、強力な言語理解能力とテキスト創作、知識問答、コードプログラミング、数学計算、論理推論、感情分析、テキスト要約などの能力を備えています。革新的に大データの事前学習と多源の豊富な知識を組み合わせ、アルゴリズム技術を継続的に磨き、膨大なテキストデータから語彙、構造、文法、意味などの新しい知識を吸収し、モデルの効果を進化させています。ユーザーにより便利な情報とサービス、よりインテリジェントな体験を提供します。" }, + "taichu_vqa": { + "description": "Taichu 2.0Vは画像理解、知識移転、論理的帰納などの能力を融合させており、テキストと画像の質問応答分野で優れたパフォーマンスを発揮しています。" + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B)は、高効率の戦略とモデルアーキテクチャを通じて、強化された計算能力を提供します。" }, diff --git a/locales/ko-KR/chat.json b/locales/ko-KR/chat.json index 19c7a9c3db2b..09cb0b0ce467 100644 --- a/locales/ko-KR/chat.json +++ b/locales/ko-KR/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "안녕하세요, 저는 **{{name}}**입니다. 지금 바로 저와 대화를 시작하시거나 [도우미 설정]({{url}})으로 가셔서 제 정보를 완성하실 수 있습니다.", "agentDefaultMessageWithSystemRole": "안녕하세요, 저는 **{{name}}**입니다. {{systemRole}}입니다. 대화를 시작해 봅시다!", "agentDefaultMessageWithoutEdit": "안녕하세요, 저는 **{{name}}**입니다. 대화를 시작해보세요!", - "agentsAndConversations": "에이전트 및 대화", + "agents": "도우미", "artifact": { "generating": "생성 중", "thinking": "생각 중", @@ -81,7 +81,7 @@ }, "regenerate": "재생성", "roleAndArchive": "역할 및 아카이브", - "searchAgentPlaceholder": "도우미 및 대화 검색...", + "searchAgentPlaceholder": "검색 도우미...", "sendPlaceholder": "채팅 내용 입력...", "sessionGroup": { "config": "그룹 설정", diff --git a/locales/ko-KR/models.json b/locales/ko-KR/models.json index 371873487453..cf0fd1a2c9a5 100644 --- a/locales/ko-KR/models.json +++ b/locales/ko-KR/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B)는 혁신적인 모델로, 다양한 분야의 응용과 복잡한 작업에 적합합니다." }, + "Max-32k": { + "description": "Spark Max 32K는 대규모 컨텍스트 처리 능력을 갖추고 있으며, 더 강력한 컨텍스트 이해 및 논리 추론 능력을 제공합니다. 32K 토큰의 텍스트 입력을 지원하며, 긴 문서 읽기, 개인 지식 질문 응답 등 다양한 상황에 적합합니다." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO는 뛰어난 창의적 경험을 제공하기 위해 설계된 고도로 유연한 다중 모델 통합입니다." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001은 효율적인 다중 모달 모델로, 광범위한 응용 프로그램 확장을 지원합니다." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002는 효율적인 다중 모달 모델로, 광범위한 응용 프로그램의 확장을 지원합니다." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827은 대규모 작업 시나리오 처리를 위해 설계되었으며, 비할 데 없는 처리 속도를 제공합니다." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924는 최신 실험 모델로, 텍스트 및 다중 모달 사용 사례에서 상당한 성능 향상을 보여줍니다." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827은 최적화된 다중 모달 처리 능력을 제공하며, 다양한 복잡한 작업 시나리오에 적합합니다." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001은 확장 가능한 다중 모달 AI 솔루션으로, 광범위한 복잡한 작업을 지원합니다." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002는 최신 생산 준비 모델로, 특히 수학, 긴 문맥 및 시각적 작업에서 더 높은 품질의 출력을 제공합니다." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801은 뛰어난 다중 모달 처리 능력을 제공하여 응용 프로그램 개발에 더 큰 유연성을 제공합니다." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "자이동 태초 언어 대모델은 뛰어난 언어 이해 능력과 텍스트 창작, 지식 질문 응답, 코드 프로그래밍, 수학 계산, 논리 추론, 감정 분석, 텍스트 요약 등의 능력을 갖추고 있습니다. 혁신적으로 대규모 데이터 사전 훈련과 다원적 풍부한 지식을 결합하여 알고리즘 기술을 지속적으로 다듬고, 방대한 텍스트 데이터에서 어휘, 구조, 문법, 의미 등의 새로운 지식을 지속적으로 흡수하여 모델 성능을 지속적으로 진화시킵니다. 사용자에게 보다 편리한 정보와 서비스, 그리고 더 지능적인 경험을 제공합니다." }, + "taichu_vqa": { + "description": "Taichu 2.0V는 이미지 이해, 지식 이전, 논리적 귀속 등의 능력을 통합하여, 텍스트와 이미지 질문 응답 분야에서 뛰어난 성능을 발휘합니다." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B)는 효율적인 전략과 모델 아키텍처를 통해 향상된 계산 능력을 제공합니다." }, diff --git a/locales/nl-NL/chat.json b/locales/nl-NL/chat.json index 5fd75ec8db6f..478299f274a8 100644 --- a/locales/nl-NL/chat.json +++ b/locales/nl-NL/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Hallo, ik ben **{{name}}**. Je kunt meteen met me beginnen praten, of je kunt naar [Assistentinstellingen]({{url}}) gaan om mijn informatie aan te vullen.", "agentDefaultMessageWithSystemRole": "Hallo, ik ben **{{name}}**, {{systemRole}}, laten we beginnen met praten!", "agentDefaultMessageWithoutEdit": "Hallo, ik ben **{{name}}**. Laten we beginnen met een gesprek!", - "agentsAndConversations": "agenten en gesprekken", + "agents": "Assistent", "artifact": { "generating": "Genereren", "thinking": "Denken", @@ -81,7 +81,7 @@ }, "regenerate": "Opnieuw genereren", "roleAndArchive": "Rol en archief", - "searchAgentPlaceholder": "Zoek assistenten en gesprekken...", + "searchAgentPlaceholder": "Zoekassistent...", "sendPlaceholder": "Voer chatbericht in...", "sessionGroup": { "config": "Groepsbeheer", diff --git a/locales/nl-NL/models.json b/locales/nl-NL/models.json index 7f9fd89e47ba..fdfccbc3bf8a 100644 --- a/locales/nl-NL/models.json +++ b/locales/nl-NL/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) is een innovatief model, geschikt voor toepassingen in meerdere domeinen en complexe taken." }, + "Max-32k": { + "description": "Spark Max 32K is uitgerust met een grote contextverwerkingscapaciteit, verbeterd begrip van context en logische redeneervaardigheden, ondersteunt tekstinvoer van 32K tokens, geschikt voor het lezen van lange documenten, privé kennisvragen en andere scenario's." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO is een zeer flexibele multi-model combinatie, ontworpen om een uitstekende creatieve ervaring te bieden." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 is een efficiënt multimodaal model dat ondersteuning biedt voor brede toepassingsuitbreiding." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 is een efficiënt multimodaal model dat ondersteuning biedt voor een breed scala aan toepassingen." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 is ontworpen voor het verwerken van grootschalige taakscenario's en biedt ongeëvenaarde verwerkingssnelheid." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 is het nieuwste experimentele model, met aanzienlijke prestatieverbeteringen in tekst- en multimodale toepassingen." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 biedt geoptimaliseerde multimodale verwerkingscapaciteiten, geschikt voor verschillende complexe taakscenario's." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 is een schaalbare multimodale AI-oplossing die ondersteuning biedt voor een breed scala aan complexe taken." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 is het nieuwste productieklare model, dat hogere kwaliteit output biedt, met name op het gebied van wiskunde, lange contexten en visuele taken." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 biedt uitstekende multimodale verwerkingscapaciteiten en biedt meer flexibiliteit voor applicatieontwikkeling." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Het Zido Tai Chu-taalmodel heeft een sterke taalbegripcapaciteit en kan tekstcreatie, kennisvragen, codeprogrammering, wiskundige berekeningen, logische redenering, sentimentanalyse, tekstsamenvattingen en meer aan. Het combineert innovatief grote data voortraining met rijke kennis uit meerdere bronnen, door algoritmische technologie continu te verfijnen en voortdurend nieuwe kennis op te nemen uit enorme tekstdata op het gebied van vocabulaire, structuur, grammatica en semantiek, waardoor de modelprestaties voortdurend evolueren. Het biedt gebruikers gemakkelijkere informatie en diensten en een meer intelligente ervaring." }, + "taichu_vqa": { + "description": "Taichu 2.0V combineert capaciteiten zoals beeldbegrip, kennisoverdracht en logische toerekening, en presteert uitstekend in het domein van beeld-tekst vraag en antwoord." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) biedt verbeterde rekenkracht door middel van efficiënte strategieën en modelarchitectuur." }, diff --git a/locales/pl-PL/chat.json b/locales/pl-PL/chat.json index 891ce02e31ed..fc5d47c3d19b 100644 --- a/locales/pl-PL/chat.json +++ b/locales/pl-PL/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Cześć, jestem **{{name}}**, możesz od razu rozpocząć ze mną rozmowę lub przejść do [ustawień asystenta]({{url}}), aby uzupełnić moje informacje.", "agentDefaultMessageWithSystemRole": "Cześć, jestem **{{name}}**, {{systemRole}}, zacznijmy rozmowę!", "agentDefaultMessageWithoutEdit": "Cześć, jestem **{{name}}**. Zacznijmy rozmowę!", - "agentsAndConversations": "Agenci i rozmowy", + "agents": "Asystent", "artifact": { "generating": "Generowanie", "thinking": "Myślenie", @@ -81,7 +81,7 @@ }, "regenerate": "Wygeneruj ponownie", "roleAndArchive": "Rola i archiwum", - "searchAgentPlaceholder": "Szukaj asystentów i rozmów...", + "searchAgentPlaceholder": "Wyszukaj pomocnika...", "sendPlaceholder": "Wpisz treść rozmowy...", "sessionGroup": { "config": "Zarządzanie grupami", diff --git a/locales/pl-PL/models.json b/locales/pl-PL/models.json index 019c37b79707..e0b3870bd5ba 100644 --- a/locales/pl-PL/models.json +++ b/locales/pl-PL/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) to innowacyjny model, idealny do zastosowań w wielu dziedzinach i złożonych zadań." }, + "Max-32k": { + "description": "Spark Max 32K ma dużą zdolność przetwarzania kontekstu, lepsze zrozumienie kontekstu i zdolności logicznego rozumowania, obsługując teksty o długości 32K tokenów, odpowiednie do czytania długich dokumentów, prywatnych pytań o wiedzę i innych scenariuszy." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO to wysoce elastyczna fuzja wielu modeli, mająca na celu zapewnienie doskonałego doświadczenia twórczego." }, @@ -44,6 +47,27 @@ "NousResearch/Nous-Hermes-2-Yi-34B": { "description": "Nous Hermes-2 Yi (34B) oferuje zoptymalizowane wyjście językowe i różnorodne możliwości zastosowania." }, + "Phi-3-5-mini-instruct": { + "description": "Odświeżona wersja modelu Phi-3-mini." + }, + "Phi-3-medium-128k-instruct": { + "description": "Ten sam model Phi-3-medium, ale z większym rozmiarem kontekstu do RAG lub kilku strzałowego wywoływania." + }, + "Phi-3-medium-4k-instruct": { + "description": "Model z 14 miliardami parametrów, oferujący lepszą jakość niż Phi-3-mini, z naciskiem na dane o wysokiej jakości i gęstości rozumowania." + }, + "Phi-3-mini-128k-instruct": { + "description": "Ten sam model Phi-3-mini, ale z większym rozmiarem kontekstu do RAG lub kilku strzałowego wywoływania." + }, + "Phi-3-mini-4k-instruct": { + "description": "Najmniejszy członek rodziny Phi-3. Zoptymalizowany zarówno pod kątem jakości, jak i niskiej latencji." + }, + "Phi-3-small-128k-instruct": { + "description": "Ten sam model Phi-3-small, ale z większym rozmiarem kontekstu do RAG lub kilku strzałowego wywoływania." + }, + "Phi-3-small-8k-instruct": { + "description": "Model z 7 miliardami parametrów, oferujący lepszą jakość niż Phi-3-mini, z naciskiem na dane o wysokiej jakości i gęstości rozumowania." + }, "Pro-128k": { "description": "Spark Pro-128K ma wyjątkową zdolność przetwarzania kontekstu, mogąc obsługiwać do 128K informacji kontekstowych, szczególnie odpowiedni do analizy całościowej i długoterminowego przetwarzania logicznego w długich tekstach, zapewniając płynne i spójne logicznie komunikowanie się oraz różnorodne wsparcie cytatów." }, @@ -56,6 +80,24 @@ "Qwen/Qwen2-72B-Instruct": { "description": "Qwen2 to zaawansowany uniwersalny model językowy, wspierający różne typy poleceń." }, + "Qwen/Qwen2.5-14B-Instruct": { + "description": "Qwen2.5 to nowa seria dużych modeli językowych, zaprojektowana w celu optymalizacji przetwarzania zadań instrukcyjnych." + }, + "Qwen/Qwen2.5-32B-Instruct": { + "description": "Qwen2.5 to nowa seria dużych modeli językowych, zaprojektowana w celu optymalizacji przetwarzania zadań instrukcyjnych." + }, + "Qwen/Qwen2.5-72B-Instruct": { + "description": "Qwen2.5 to nowa seria dużych modeli językowych, z silniejszymi zdolnościami rozumienia i generacji." + }, + "Qwen/Qwen2.5-7B-Instruct": { + "description": "Qwen2.5 to nowa seria dużych modeli językowych, zaprojektowana w celu optymalizacji przetwarzania zadań instrukcyjnych." + }, + "Qwen/Qwen2.5-Coder-7B-Instruct": { + "description": "Qwen2.5-Coder koncentruje się na pisaniu kodu." + }, + "Qwen/Qwen2.5-Math-72B-Instruct": { + "description": "Qwen2.5-Math koncentruje się na rozwiązywaniu problemów w dziedzinie matematyki, oferując profesjonalne odpowiedzi na trudne pytania." + }, "THUDM/glm-4-9b-chat": { "description": "GLM-4 9B to otwarta wersja, oferująca zoptymalizowane doświadczenie dialogowe dla aplikacji konwersacyjnych." }, @@ -131,6 +173,15 @@ "accounts/yi-01-ai/models/yi-large": { "description": "Model Yi-Large, oferujący doskonałe możliwości przetwarzania wielojęzycznego, nadający się do różnych zadań generowania i rozumienia języka." }, + "ai21-jamba-1.5-large": { + "description": "Model wielojęzyczny z 398 miliardami parametrów (94 miliardy aktywnych), oferujący okno kontekstowe o długości 256K, wywoływanie funkcji, strukturalne wyjście i generację opartą na kontekście." + }, + "ai21-jamba-1.5-mini": { + "description": "Model wielojęzyczny z 52 miliardami parametrów (12 miliardów aktywnych), oferujący okno kontekstowe o długości 256K, wywoływanie funkcji, strukturalne wyjście i generację opartą na kontekście." + }, + "ai21-jamba-instruct": { + "description": "Model LLM oparty na Mamba, zaprojektowany do osiągania najlepszej wydajności, jakości i efektywności kosztowej." + }, "anthropic.claude-3-5-sonnet-20240620-v1:0": { "description": "Claude 3.5 Sonnet podnosi standardy branżowe, przewyższając modele konkurencji oraz Claude 3 Opus, osiągając doskonałe wyniki w szerokim zakresie ocen, jednocześnie oferując szybkość i koszty na poziomie naszych modeli średniej klasy." }, @@ -227,6 +278,12 @@ "cognitivecomputations/dolphin-mixtral-8x22b": { "description": "Dolphin Mixtral 8x22B to model zaprojektowany do przestrzegania instrukcji, dialogów i programowania." }, + "cohere-command-r": { + "description": "Command R to skalowalny model generatywny, który koncentruje się na RAG i użyciu narzędzi, aby umożliwić AI na skalę produkcyjną dla przedsiębiorstw." + }, + "cohere-command-r-plus": { + "description": "Command R+ to model zoptymalizowany pod kątem RAG, zaprojektowany do obsługi obciążeń roboczych na poziomie przedsiębiorstwa." + }, "command-r": { "description": "Command R to LLM zoptymalizowany do dialogów i zadań z długim kontekstem, szczególnie odpowiedni do dynamicznej interakcji i zarządzania wiedzą." }, @@ -275,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 to wydajny model multimodalny, wspierający szerokie zastosowania." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 to wydajny model multimodalny, który wspiera szeroką gamę zastosowań." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 został zaprojektowany do obsługi dużych zadań, oferując niezrównaną prędkość przetwarzania." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 to najnowszy eksperymentalny model, który wykazuje znaczące poprawy wydajności w zastosowaniach tekstowych i multimodalnych." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 oferuje zoptymalizowane możliwości przetwarzania multimodalnego, odpowiednie do różnych złożonych scenariuszy zadań." }, @@ -287,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 to skalowalne rozwiązanie AI multimodalnego, wspierające szeroki zakres złożonych zadań." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 to najnowszy model gotowy do produkcji, oferujący wyższą jakość wyników, ze szczególnym uwzględnieniem zadań matematycznych, długich kontekstów i zadań wizualnych." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 oferuje doskonałe możliwości przetwarzania multimodalnego, zapewniając większą elastyczność w rozwoju aplikacji." }, @@ -434,6 +500,8 @@ "internlm/internlm2_5-7b-chat": { "description": "InternLM2.5 oferuje inteligentne rozwiązania dialogowe w różnych scenariuszach." }, + "jamba-1.5-large": {}, + "jamba-1.5-mini": {}, "llama-3.1-70b-instruct": { "description": "Model Llama 3.1 70B Instruct, z 70B parametrami, oferujący doskonałe osiągi w dużych zadaniach generowania tekstu i poleceń." }, @@ -497,6 +565,21 @@ "mathstral": { "description": "MathΣtral zaprojektowany do badań naukowych i wnioskowania matematycznego, oferujący efektywne możliwości obliczeniowe i interpretację wyników." }, + "meta-llama-3-70b-instruct": { + "description": "Potężny model z 70 miliardami parametrów, doskonały w rozumowaniu, kodowaniu i szerokich zastosowaniach językowych." + }, + "meta-llama-3-8b-instruct": { + "description": "Wszechstronny model z 8 miliardami parametrów, zoptymalizowany do zadań dialogowych i generacji tekstu." + }, + "meta-llama-3.1-405b-instruct": { + "description": "Modele tekstowe Llama 3.1 dostosowane do instrukcji, zoptymalizowane do wielojęzycznych przypadków użycia dialogowego, przewyższają wiele dostępnych modeli open source i zamkniętych w powszechnych benchmarkach branżowych." + }, + "meta-llama-3.1-70b-instruct": { + "description": "Modele tekstowe Llama 3.1 dostosowane do instrukcji, zoptymalizowane do wielojęzycznych przypadków użycia dialogowego, przewyższają wiele dostępnych modeli open source i zamkniętych w powszechnych benchmarkach branżowych." + }, + "meta-llama-3.1-8b-instruct": { + "description": "Modele tekstowe Llama 3.1 dostosowane do instrukcji, zoptymalizowane do wielojęzycznych przypadków użycia dialogowego, przewyższają wiele dostępnych modeli open source i zamkniętych w powszechnych benchmarkach branżowych." + }, "meta-llama/Llama-2-13b-chat-hf": { "description": "LLaMA-2 Chat (13B) oferuje doskonałe możliwości przetwarzania języka i znakomite doświadczenie interakcji." }, @@ -584,12 +667,21 @@ "mistral-large": { "description": "Mixtral Large to flagowy model Mistral, łączący zdolności generowania kodu, matematyki i wnioskowania, wspierający kontekst o długości 128k." }, + "mistral-large-2407": { + "description": "Mistral Large (2407) to zaawansowany model językowy (LLM) z najnowocześniejszymi zdolnościami rozumowania, wiedzy i kodowania." + }, "mistral-large-latest": { "description": "Mistral Large to flagowy model, doskonały w zadaniach wielojęzycznych, złożonym wnioskowaniu i generowaniu kodu, idealny do zaawansowanych zastosowań." }, "mistral-nemo": { "description": "Mistral Nemo, opracowany przez Mistral AI i NVIDIA, to model 12B o wysokiej wydajności." }, + "mistral-small": { + "description": "Mistral Small może być używany w każdym zadaniu opartym na języku, które wymaga wysokiej wydajności i niskiej latencji." + }, + "mistral-small-latest": { + "description": "Mistral Small to opcja o wysokiej efektywności kosztowej, szybka i niezawodna, odpowiednia do tłumaczeń, podsumowań i analizy sentymentu." + }, "mistralai/Mistral-7B-Instruct-v0.1": { "description": "Mistral (7B) Instruct jest znany z wysokiej wydajności, idealny do różnorodnych zadań językowych." }, @@ -677,9 +769,30 @@ "phi3:14b": { "description": "Phi-3 to lekki model otwarty wydany przez Microsoft, odpowiedni do efektywnej integracji i dużej skali wnioskowania wiedzy." }, + "pixtral-12b-2409": { + "description": "Model Pixtral wykazuje silne zdolności w zadaniach związanych z analizą wykresów i zrozumieniem obrazów, pytaniami dokumentowymi, wielomodalnym rozumowaniem i przestrzeganiem instrukcji, zdolny do przyjmowania obrazów w naturalnej rozdzielczości i proporcjach, a także do przetwarzania dowolnej liczby obrazów w długim oknie kontekstowym o długości do 128K tokenów." + }, + "qwen-coder-turbo-latest": { + "description": "Model kodowania Qwen." + }, "qwen-long": { "description": "Qwen to ultra-duży model językowy, który obsługuje długie konteksty tekstowe oraz funkcje dialogowe oparte na długich dokumentach i wielu dokumentach." }, + "qwen-math-plus-latest": { + "description": "Model matematyczny Qwen, stworzony specjalnie do rozwiązywania problemów matematycznych." + }, + "qwen-math-turbo-latest": { + "description": "Model matematyczny Qwen, stworzony specjalnie do rozwiązywania problemów matematycznych." + }, + "qwen-max-latest": { + "description": "Model językowy Qwen Max o skali miliardów parametrów, obsługujący różne języki, w tym chiński i angielski, będący API modelu za produktem Qwen 2.5." + }, + "qwen-plus-latest": { + "description": "Wzmocniona wersja modelu językowego Qwen Plus, obsługująca różne języki, w tym chiński i angielski." + }, + "qwen-turbo-latest": { + "description": "Model językowy Qwen Turbo, obsługujący różne języki, w tym chiński i angielski." + }, "qwen-vl-chat-v1": { "description": "Qwen VL obsługuje elastyczne interakcje, w tym wiele obrazów, wielokrotne pytania i odpowiedzi oraz zdolności twórcze." }, @@ -698,6 +811,33 @@ "qwen2": { "description": "Qwen2 to nowa generacja dużego modelu językowego Alibaba, wspierająca różnorodne potrzeby aplikacyjne dzięki doskonałej wydajności." }, + "qwen2.5-14b-instruct": { + "description": "Model Qwen 2.5 o skali 14B, udostępniony na zasadzie open source." + }, + "qwen2.5-32b-instruct": { + "description": "Model Qwen 2.5 o skali 32B, udostępniony na zasadzie open source." + }, + "qwen2.5-72b-instruct": { + "description": "Model Qwen 2.5 o skali 72B, udostępniony na zasadzie open source." + }, + "qwen2.5-7b-instruct": { + "description": "Model Qwen 2.5 o skali 7B, udostępniony na zasadzie open source." + }, + "qwen2.5-coder-1.5b-instruct": { + "description": "Otwarta wersja modelu kodowania Qwen." + }, + "qwen2.5-coder-7b-instruct": { + "description": "Otwarta wersja modelu kodowania Qwen." + }, + "qwen2.5-math-1.5b-instruct": { + "description": "Model Qwen-Math, który ma silne zdolności rozwiązywania problemów matematycznych." + }, + "qwen2.5-math-72b-instruct": { + "description": "Model Qwen-Math, który ma silne zdolności rozwiązywania problemów matematycznych." + }, + "qwen2.5-math-7b-instruct": { + "description": "Model Qwen-Math, który ma silne zdolności rozwiązywania problemów matematycznych." + }, "qwen2:0.5b": { "description": "Qwen2 to nowa generacja dużego modelu językowego Alibaba, wspierająca różnorodne potrzeby aplikacyjne dzięki doskonałej wydajności." }, @@ -743,6 +883,9 @@ "taichu_llm": { "description": "Model językowy TaiChu charakteryzuje się wyjątkową zdolnością rozumienia języka oraz umiejętnościami w zakresie tworzenia tekstów, odpowiadania na pytania, programowania, obliczeń matematycznych, wnioskowania logicznego, analizy emocji i streszczenia tekstu. Innowacyjnie łączy wstępne uczenie się na dużych zbiorach danych z bogatą wiedzą z wielu źródeł, stale doskonaląc technologię algorytmiczną i nieustannie przyswajając nową wiedzę z zakresu słownictwa, struktury, gramatyki i semantyki z ogromnych zbiorów danych tekstowych, co prowadzi do ciągłej ewolucji modelu. Umożliwia użytkownikom łatwiejszy dostęp do informacji i usług oraz bardziej inteligentne doświadczenia." }, + "taichu_vqa": { + "description": "Taichu 2.0V łączy zdolności rozumienia obrazów, transferu wiedzy i logicznego wnioskowania, osiągając znakomite wyniki w dziedzinie pytań i odpowiedzi na podstawie tekstu i obrazów." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) oferuje zwiększoną moc obliczeniową dzięki efektywnym strategiom i architekturze modelu." }, diff --git a/locales/pt-BR/chat.json b/locales/pt-BR/chat.json index b7425c6f5a3b..6258b73c11d4 100644 --- a/locales/pt-BR/chat.json +++ b/locales/pt-BR/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Olá, eu sou **{{name}}**, você pode começar a conversar comigo agora ou ir para [Configurações do Assistente]({{url}}) para completar minhas informações.", "agentDefaultMessageWithSystemRole": "Olá, eu sou **{{name}}**, {{systemRole}}, vamos conversar!", "agentDefaultMessageWithoutEdit": "Olá, sou o **{{name}}**, vamos começar a conversa!", - "agentsAndConversations": "Agentes e Conversas", + "agents": "Assistente", "artifact": { "generating": "Gerando", "thinking": "Pensando", @@ -81,7 +81,7 @@ }, "regenerate": "Regenerar", "roleAndArchive": "Função e Arquivo", - "searchAgentPlaceholder": "Pesquisar assistentes e conversas...", + "searchAgentPlaceholder": "Assistente de busca...", "sendPlaceholder": "Digite a mensagem...", "sessionGroup": { "config": "Gerenciar grupos", diff --git a/locales/pt-BR/models.json b/locales/pt-BR/models.json index cbeb260d3bca..a496c8d96212 100644 --- a/locales/pt-BR/models.json +++ b/locales/pt-BR/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) é um modelo inovador, adequado para aplicações em múltiplas áreas e tarefas complexas." }, + "Max-32k": { + "description": "O Spark Max 32K possui uma grande capacidade de processamento de contexto, com uma compreensão e raciocínio lógico mais robustos, suportando entradas de texto de 32K tokens, adequado para leitura de documentos longos, perguntas e respostas sobre conhecimento privado e outros cenários." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO é uma fusão de múltiplos modelos altamente flexível, projetada para oferecer uma experiência criativa excepcional." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 é um modelo multimodal eficiente, suportando a expansão de aplicações amplas." }, + "gemini-1.5-flash-002": { + "description": "O Gemini 1.5 Flash 002 é um modelo multimodal eficiente, que suporta uma ampla gama de aplicações." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 é projetado para lidar com cenários de tarefas em larga escala, oferecendo velocidade de processamento incomparável." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "O Gemini 1.5 Flash 8B 0924 é o mais recente modelo experimental, com melhorias significativas de desempenho em casos de uso de texto e multimídia." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 oferece capacidade de processamento multimodal otimizada, adequada para uma variedade de cenários de tarefas complexas." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 é uma solução de IA multimodal escalável, suportando uma ampla gama de tarefas complexas." }, + "gemini-1.5-pro-002": { + "description": "O Gemini 1.5 Pro 002 é o mais recente modelo pronto para produção, oferecendo saídas de maior qualidade, com melhorias significativas em tarefas matemáticas, contextos longos e tarefas visuais." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 oferece excelente capacidade de processamento multimodal, proporcionando maior flexibilidade para o desenvolvimento de aplicações." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "O modelo de linguagem Taichu possui uma forte capacidade de compreensão de linguagem, além de habilidades em criação de texto, perguntas e respostas, programação de código, cálculos matemáticos, raciocínio lógico, análise de sentimentos e resumo de texto. Inova ao combinar pré-treinamento com grandes dados e conhecimento rico de múltiplas fontes, aprimorando continuamente a tecnologia de algoritmos e absorvendo novos conhecimentos de vocabulário, estrutura, gramática e semântica de grandes volumes de dados textuais, proporcionando aos usuários informações e serviços mais convenientes e uma experiência mais inteligente." }, + "taichu_vqa": { + "description": "O Taichu 2.0V combina habilidades de compreensão de imagem, transferência de conhecimento e atribuição lógica, destacando-se no campo de perguntas e respostas baseadas em texto e imagem." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) oferece capacidade de computação aprimorada através de estratégias e arquiteturas de modelo eficientes." }, diff --git a/locales/ru-RU/chat.json b/locales/ru-RU/chat.json index 6af41335644b..b900f9ad1d10 100644 --- a/locales/ru-RU/chat.json +++ b/locales/ru-RU/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Здравствуйте, я **{{name}}**. Вы можете сразу начать со мной разговор или перейти в [настройки помощника]({{url}}), чтобы дополнить мою информацию.", "agentDefaultMessageWithSystemRole": "Привет, я **{{name}}**, {{systemRole}}. Давай начнем разговор!", "agentDefaultMessageWithoutEdit": "Привет, я **{{name}}**, давай начнём разговор!", - "agentsAndConversations": "Агенты и беседы", + "agents": "Ассистент", "artifact": { "generating": "Генерация", "thinking": "В процессе размышлений", @@ -81,7 +81,7 @@ }, "regenerate": "Сгенерировать заново", "roleAndArchive": "Роль и архив", - "searchAgentPlaceholder": "Поиск помощников и разговоров...", + "searchAgentPlaceholder": "Поиск помощника...", "sendPlaceholder": "Введите сообщение...", "sessionGroup": { "config": "Управление группами", diff --git a/locales/ru-RU/models.json b/locales/ru-RU/models.json index 181ec15788bf..1f1740c5a1df 100644 --- a/locales/ru-RU/models.json +++ b/locales/ru-RU/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) — это инновационная модель, подходящая для многообластных приложений и сложных задач." }, + "Max-32k": { + "description": "Spark Max 32K оснащен высокой способностью обработки контекста, улучшенным пониманием контекста и логическим выводом, поддерживает текстовый ввод до 32K токенов, подходит для чтения длинных документов, частных вопросов и ответов и других сценариев" + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO — это высокоадаптивная многомодельная комбинация, предназначенная для предоставления выдающегося творческого опыта." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 — это эффективная многомодальная модель, поддерживающая масштабирование для широкого спектра приложений." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 — это эффективная мультимодальная модель, поддерживающая расширенные применения." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 разработан для обработки масштабных задач, обеспечивая непревзойдённую скорость обработки." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 — это последняя экспериментальная модель, которая демонстрирует значительное улучшение производительности как в текстовых, так и в мультимодальных задачах." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 предлагает оптимизированные многомодальные возможности обработки, подходящие для различных сложных задач." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 — это масштабируемое решение для многомодального ИИ, поддерживающее широкий спектр сложных задач." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 — это последняя модель, готовая к производству, которая обеспечивает более высокое качество вывода, особенно в математических задачах, длинных контекстах и визуальных задачах." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 предлагает выдающиеся возможности многомодальной обработки, обеспечивая большую гибкость для разработки приложений." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Модель языка TaiChu обладает выдающимися способностями к пониманию языка, а также к созданию текстов, ответам на вопросы, программированию, математическим вычислениям, логическому выводу, анализу эмоций и резюмированию текстов. Инновационно сочетает предобучение на больших данных с богатством многопоточных знаний, постоянно совершенствуя алгоритмические технологии и поглощая новые знания о словах, структуре, грамматике и семантике из огромных объемов текстовых данных, обеспечивая пользователям более удобную информацию и услуги, а также более интеллектуальный опыт." }, + "taichu_vqa": { + "description": "Taichu 2.0V объединяет возможности понимания изображений, передачи знаний, логического вывода и других, демонстрируя выдающиеся результаты в области вопросов и ответов на основе текста и изображений." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) обеспечивает повышенные вычислительные возможности благодаря эффективным стратегиям и архитектуре модели." }, diff --git a/locales/tr-TR/chat.json b/locales/tr-TR/chat.json index 3128f4a671b5..4fdb187fd278 100644 --- a/locales/tr-TR/chat.json +++ b/locales/tr-TR/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Merhaba, ben **{{name}}**. Hemen benimle konuşmaya başlayabilir veya [Asistan Ayarları]({{url}}) sayfasına giderek bilgilerimi güncelleyebilirsin.", "agentDefaultMessageWithSystemRole": "Merhaba, Ben **{{name}}**, {{systemRole}}. Hemen sohbet etmeye başlayalım!", "agentDefaultMessageWithoutEdit": "Merhaba, ben **{{name}}**. Konuşmaya başlayalım!", - "agentsAndConversations": "Ajanlar ve Konuşmalar", + "agents": "Asistan", "artifact": { "generating": "Üretiliyor", "thinking": "Düşünülüyor", @@ -81,7 +81,7 @@ }, "regenerate": "Tekrarla", "roleAndArchive": "Rol ve Arşiv", - "searchAgentPlaceholder": "Arama yardımcıları ve konuşmalar...", + "searchAgentPlaceholder": "Arama Asistanı...", "sendPlaceholder": "Mesajınızı buraya yazın...", "sessionGroup": { "config": "Grup Yönetimi", diff --git a/locales/tr-TR/models.json b/locales/tr-TR/models.json index 466882f0e50a..3490407b907e 100644 --- a/locales/tr-TR/models.json +++ b/locales/tr-TR/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B), çok alanlı uygulamalar ve karmaşık görevler için uygun yenilikçi bir modeldir." }, + "Max-32k": { + "description": "Spark Max 32K, büyük bağlam işleme yeteneği, daha güçlü bağlam anlama ve mantıksal akıl yürütme yeteneği ile donatılmıştır. 32K token'lık metin girişi destekler ve uzun belgelerin okunması, özel bilgi sorgulamaları gibi senaryolar için uygundur." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO, olağanüstü yaratıcı deneyimler sunmak için tasarlanmış son derece esnek bir çoklu model birleşimidir." }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001, geniş uygulama alanları için destekleyen verimli bir çok modlu modeldir." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002, geniş uygulama yelpazesini destekleyen verimli bir çok modlu modeldir." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827, büyük ölçekli görev senaryolarını işlemek için tasarlanmış, eşsiz bir işleme hızı sunar." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924, metin ve çok modlu kullanım durumlarında önemli performans artışları sunan en son deneysel modeldir." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827, çeşitli karmaşık görev senaryoları için optimize edilmiş çok modlu işleme yeteneği sunar." }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001, geniş karmaşık görevleri destekleyen ölçeklenebilir bir çok modlu AI çözümüdür." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002, daha yüksek kaliteli çıktılar sunan en son üretim hazır modeldir; özellikle matematik, uzun bağlam ve görsel görevlerde önemli iyileştirmeler sağlamaktadır." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801, mükemmel çok modlu işleme yeteneği sunar ve uygulama geliştirmeye daha fazla esneklik kazandırır." }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "Zidong Taichu dil büyük modeli, güçlü dil anlama yeteneği ile metin oluşturma, bilgi sorgulama, kod programlama, matematik hesaplama, mantıksal akıl yürütme, duygu analizi, metin özeti gibi yeteneklere sahiptir. Yenilikçi bir şekilde büyük veri ön eğitimi ile çok kaynaklı zengin bilgiyi birleştirir, algoritma teknolojisini sürekli olarak geliştirir ve büyük metin verilerinden kelime, yapı, dil bilgisi, anlam gibi yeni bilgileri sürekli olarak edinir, modelin performansını sürekli olarak evrimleştirir. Kullanıcılara daha kolay bilgi ve hizmetler sunar ve daha akıllı bir deneyim sağlar." }, + "taichu_vqa": { + "description": "Taichu 2.0V, görüntü anlama, bilgi aktarımı, mantıksal çıkarım gibi yetenekleri birleştirerek, metin ve görsel soru-cevap alanında öne çıkmaktadır." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B), etkili stratejiler ve model mimarisi ile artırılmış hesaplama yetenekleri sunar." }, diff --git a/locales/vi-VN/chat.json b/locales/vi-VN/chat.json index d6dfe611ec3c..f36945b2dd72 100644 --- a/locales/vi-VN/chat.json +++ b/locales/vi-VN/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "Xin chào, tôi là **{{name}}**, bạn có thể bắt đầu trò chuyện với tôi ngay bây giờ, hoặc bạn có thể đến [Cài đặt trợ lý]({{url}}) để hoàn thiện thông tin của tôi.", "agentDefaultMessageWithSystemRole": "Xin chào, tôi là **{{name}}**, {{systemRole}}. Hãy bắt đầu trò chuyện ngay!", "agentDefaultMessageWithoutEdit": "Xin chào, tôi là **{{name}}**, chúng ta hãy bắt đầu trò chuyện nào!", - "agentsAndConversations": "Người hỗ trợ và cuộc trò chuyện", + "agents": "Trợ lý", "artifact": { "generating": "Đang tạo", "thinking": "Đang suy nghĩ", @@ -81,7 +81,7 @@ }, "regenerate": "Tạo lại", "roleAndArchive": "Vai trò và lưu trữ", - "searchAgentPlaceholder": "Tìm kiếm trợ lý và cuộc trò chuyện...", + "searchAgentPlaceholder": "Trợ lý tìm kiếm...", "sendPlaceholder": "Nhập nội dung trò chuyện...", "sessionGroup": { "config": "Quản lý nhóm", diff --git a/locales/vi-VN/models.json b/locales/vi-VN/models.json index 75fe144d5694..96bab9d7ee8b 100644 --- a/locales/vi-VN/models.json +++ b/locales/vi-VN/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) là một mô hình sáng tạo, phù hợp cho nhiều lĩnh vực ứng dụng và nhiệm vụ phức tạp." }, + "Max-32k": { + "description": "Spark Max 32K được cấu hình với khả năng xử lý ngữ cảnh lớn, khả năng hiểu ngữ cảnh và lý luận logic mạnh mẽ hơn, hỗ trợ đầu vào văn bản 32K token, phù hợp cho việc đọc tài liệu dài, hỏi đáp kiến thức riêng tư và các tình huống khác." + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO là một mô hình kết hợp đa dạng, nhằm cung cấp trải nghiệm sáng tạo xuất sắc." }, @@ -44,6 +47,27 @@ "NousResearch/Nous-Hermes-2-Yi-34B": { "description": "Nous Hermes-2 Yi (34B) cung cấp đầu ra ngôn ngữ tối ưu và khả năng ứng dụng đa dạng." }, + "Phi-3-5-mini-instruct": { + "description": "Cập nhật mô hình Phi-3-mini." + }, + "Phi-3-medium-128k-instruct": { + "description": "Mô hình Phi-3-medium giống nhau, nhưng với kích thước ngữ cảnh lớn hơn cho RAG hoặc gợi ý ít." + }, + "Phi-3-medium-4k-instruct": { + "description": "Mô hình 14B tham số, chứng minh chất lượng tốt hơn Phi-3-mini, tập trung vào dữ liệu dày đặc lý luận chất lượng cao." + }, + "Phi-3-mini-128k-instruct": { + "description": "Mô hình Phi-3-mini giống nhau, nhưng với kích thước ngữ cảnh lớn hơn cho RAG hoặc gợi ý ít." + }, + "Phi-3-mini-4k-instruct": { + "description": "Thành viên nhỏ nhất của gia đình Phi-3. Tối ưu hóa cho cả chất lượng và độ trễ thấp." + }, + "Phi-3-small-128k-instruct": { + "description": "Mô hình Phi-3-small giống nhau, nhưng với kích thước ngữ cảnh lớn hơn cho RAG hoặc gợi ý ít." + }, + "Phi-3-small-8k-instruct": { + "description": "Mô hình 7B tham số, chứng minh chất lượng tốt hơn Phi-3-mini, tập trung vào dữ liệu dày đặc lý luận chất lượng cao." + }, "Pro-128k": { "description": "Spark Pro-128K được cấu hình với khả năng xử lý ngữ cảnh cực lớn, có thể xử lý tới 128K thông tin ngữ cảnh, đặc biệt phù hợp cho việc phân tích toàn bộ và xử lý mối liên hệ logic lâu dài trong nội dung văn bản dài, có thể cung cấp logic mạch lạc và hỗ trợ trích dẫn đa dạng trong giao tiếp văn bản phức tạp." }, @@ -56,6 +80,24 @@ "Qwen/Qwen2-72B-Instruct": { "description": "Qwen2 là mô hình ngôn ngữ tổng quát tiên tiến, hỗ trợ nhiều loại chỉ dẫn." }, + "Qwen/Qwen2.5-14B-Instruct": { + "description": "Qwen2.5 là một loạt mô hình ngôn ngữ lớn hoàn toàn mới, nhằm tối ưu hóa việc xử lý các nhiệm vụ theo hướng dẫn." + }, + "Qwen/Qwen2.5-32B-Instruct": { + "description": "Qwen2.5 là một loạt mô hình ngôn ngữ lớn hoàn toàn mới, nhằm tối ưu hóa việc xử lý các nhiệm vụ theo hướng dẫn." + }, + "Qwen/Qwen2.5-72B-Instruct": { + "description": "Qwen2.5 là một loạt mô hình ngôn ngữ lớn hoàn toàn mới, có khả năng hiểu và tạo ra mạnh mẽ hơn." + }, + "Qwen/Qwen2.5-7B-Instruct": { + "description": "Qwen2.5 là một loạt mô hình ngôn ngữ lớn hoàn toàn mới, nhằm tối ưu hóa việc xử lý các nhiệm vụ theo hướng dẫn." + }, + "Qwen/Qwen2.5-Coder-7B-Instruct": { + "description": "Qwen2.5-Coder tập trung vào việc viết mã." + }, + "Qwen/Qwen2.5-Math-72B-Instruct": { + "description": "Qwen2.5-Math tập trung vào việc giải quyết các vấn đề trong lĩnh vực toán học, cung cấp giải pháp chuyên nghiệp cho các bài toán khó." + }, "THUDM/glm-4-9b-chat": { "description": "GLM-4 9B là phiên bản mã nguồn mở, cung cấp trải nghiệm đối thoại tối ưu cho các ứng dụng hội thoại." }, @@ -131,6 +173,15 @@ "accounts/yi-01-ai/models/yi-large": { "description": "Mô hình Yi-Large, có khả năng xử lý đa ngôn ngữ xuất sắc, có thể được sử dụng cho nhiều nhiệm vụ sinh và hiểu ngôn ngữ." }, + "ai21-jamba-1.5-large": { + "description": "Mô hình đa ngôn ngữ với 398B tham số (94B hoạt động), cung cấp cửa sổ ngữ cảnh dài 256K, gọi hàm, đầu ra có cấu trúc và tạo ra nội dung có căn cứ." + }, + "ai21-jamba-1.5-mini": { + "description": "Mô hình đa ngôn ngữ với 52B tham số (12B hoạt động), cung cấp cửa sổ ngữ cảnh dài 256K, gọi hàm, đầu ra có cấu trúc và tạo ra nội dung có căn cứ." + }, + "ai21-jamba-instruct": { + "description": "Mô hình LLM dựa trên Mamba đạt hiệu suất, chất lượng và hiệu quả chi phí tốt nhất trong ngành." + }, "anthropic.claude-3-5-sonnet-20240620-v1:0": { "description": "Claude 3.5 Sonnet nâng cao tiêu chuẩn ngành, hiệu suất vượt trội hơn các mô hình cạnh tranh và Claude 3 Opus, thể hiện xuất sắc trong nhiều đánh giá, đồng thời có tốc độ và chi phí của mô hình tầm trung của chúng tôi." }, @@ -227,6 +278,12 @@ "cognitivecomputations/dolphin-mixtral-8x22b": { "description": "Dolphin Mixtral 8x22B là mô hình được thiết kế cho việc tuân thủ hướng dẫn, đối thoại và lập trình." }, + "cohere-command-r": { + "description": "Command R là một mô hình sinh tạo có thể mở rộng, nhắm đến RAG và Sử dụng Công cụ để cho phép AI quy mô sản xuất cho doanh nghiệp." + }, + "cohere-command-r-plus": { + "description": "Command R+ là mô hình tối ưu hóa RAG hiện đại, được thiết kế để xử lý khối lượng công việc cấp doanh nghiệp." + }, "command-r": { "description": "Command R là LLM được tối ưu hóa cho các nhiệm vụ đối thoại và ngữ cảnh dài, đặc biệt phù hợp cho tương tác động và quản lý kiến thức." }, @@ -275,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 là một mô hình đa phương thức hiệu quả, hỗ trợ mở rộng cho nhiều ứng dụng." }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 là một mô hình đa phương thức hiệu quả, hỗ trợ mở rộng cho nhiều ứng dụng." + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 được thiết kế để xử lý các tình huống nhiệm vụ quy mô lớn, cung cấp tốc độ xử lý vô song." }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 là mô hình thử nghiệm mới nhất, có sự cải thiện đáng kể về hiệu suất trong các trường hợp sử dụng văn bản và đa phương thức." + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 cung cấp khả năng xử lý đa phương thức được tối ưu hóa, phù hợp cho nhiều tình huống nhiệm vụ phức tạp." }, @@ -287,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 là giải pháp AI đa phương thức có thể mở rộng, hỗ trợ nhiều nhiệm vụ phức tạp." }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 là mô hình sẵn sàng cho sản xuất mới nhất, cung cấp đầu ra chất lượng cao hơn, đặc biệt là trong các nhiệm vụ toán học, ngữ cảnh dài và thị giác." + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 cung cấp khả năng xử lý đa phương thức xuất sắc, mang lại sự linh hoạt lớn hơn cho phát triển ứng dụng." }, @@ -434,6 +500,8 @@ "internlm/internlm2_5-7b-chat": { "description": "InternLM2.5 cung cấp giải pháp đối thoại thông minh cho nhiều tình huống." }, + "jamba-1.5-large": {}, + "jamba-1.5-mini": {}, "llama-3.1-70b-instruct": { "description": "Mô hình Llama 3.1 70B Instruct, có 70B tham số, có thể cung cấp hiệu suất xuất sắc trong các nhiệm vụ sinh văn bản và chỉ dẫn lớn." }, @@ -497,6 +565,21 @@ "mathstral": { "description": "MathΣtral được thiết kế cho nghiên cứu khoa học và suy luận toán học, cung cấp khả năng tính toán hiệu quả và giải thích kết quả." }, + "meta-llama-3-70b-instruct": { + "description": "Mô hình 70 tỷ tham số mạnh mẽ, xuất sắc trong lý luận, lập trình và các ứng dụng ngôn ngữ rộng lớn." + }, + "meta-llama-3-8b-instruct": { + "description": "Mô hình 8 tỷ tham số đa năng, tối ưu hóa cho các tác vụ đối thoại và tạo văn bản." + }, + "meta-llama-3.1-405b-instruct": { + "description": "Các mô hình văn bản chỉ được tinh chỉnh theo hướng dẫn Llama 3.1 được tối ưu hóa cho các trường hợp sử dụng đối thoại đa ngôn ngữ và vượt trội hơn nhiều mô hình trò chuyện mã nguồn mở và đóng có sẵn trên các tiêu chuẩn ngành phổ biến." + }, + "meta-llama-3.1-70b-instruct": { + "description": "Các mô hình văn bản chỉ được tinh chỉnh theo hướng dẫn Llama 3.1 được tối ưu hóa cho các trường hợp sử dụng đối thoại đa ngôn ngữ và vượt trội hơn nhiều mô hình trò chuyện mã nguồn mở và đóng có sẵn trên các tiêu chuẩn ngành phổ biến." + }, + "meta-llama-3.1-8b-instruct": { + "description": "Các mô hình văn bản chỉ được tinh chỉnh theo hướng dẫn Llama 3.1 được tối ưu hóa cho các trường hợp sử dụng đối thoại đa ngôn ngữ và vượt trội hơn nhiều mô hình trò chuyện mã nguồn mở và đóng có sẵn trên các tiêu chuẩn ngành phổ biến." + }, "meta-llama/Llama-2-13b-chat-hf": { "description": "LLaMA-2 Chat (13B) cung cấp khả năng xử lý ngôn ngữ xuất sắc và trải nghiệm tương tác tuyệt vời." }, @@ -584,12 +667,21 @@ "mistral-large": { "description": "Mixtral Large là mô hình hàng đầu của Mistral, kết hợp khả năng sinh mã, toán học và suy luận, hỗ trợ cửa sổ ngữ cảnh 128k." }, + "mistral-large-2407": { + "description": "Mistral Large (2407) là một Mô hình Ngôn ngữ Lớn (LLM) tiên tiến với khả năng lý luận, kiến thức và lập trình hiện đại." + }, "mistral-large-latest": { "description": "Mistral Large là mô hình lớn hàng đầu, chuyên về các nhiệm vụ đa ngôn ngữ, suy luận phức tạp và sinh mã, là lựa chọn lý tưởng cho các ứng dụng cao cấp." }, "mistral-nemo": { "description": "Mistral Nemo được phát triển hợp tác giữa Mistral AI và NVIDIA, là mô hình 12B hiệu suất cao." }, + "mistral-small": { + "description": "Mistral Small có thể được sử dụng cho bất kỳ nhiệm vụ nào dựa trên ngôn ngữ yêu cầu hiệu suất cao và độ trễ thấp." + }, + "mistral-small-latest": { + "description": "Mistral Small là lựa chọn hiệu quả về chi phí, nhanh chóng và đáng tin cậy, phù hợp cho các trường hợp như dịch thuật, tóm tắt và phân tích cảm xúc." + }, "mistralai/Mistral-7B-Instruct-v0.1": { "description": "Mistral (7B) Instruct nổi bật với hiệu suất cao, phù hợp cho nhiều nhiệm vụ ngôn ngữ." }, @@ -677,9 +769,30 @@ "phi3:14b": { "description": "Phi-3 là mô hình mở nhẹ do Microsoft phát hành, phù hợp cho việc tích hợp hiệu quả và suy luận kiến thức quy mô lớn." }, + "pixtral-12b-2409": { + "description": "Mô hình Pixtral thể hiện khả năng mạnh mẽ trong các nhiệm vụ như hiểu biểu đồ và hình ảnh, hỏi đáp tài liệu, suy luận đa phương tiện và tuân thủ hướng dẫn, có khả năng tiếp nhận hình ảnh với độ phân giải và tỷ lệ khung hình tự nhiên, cũng như xử lý bất kỳ số lượng hình ảnh nào trong cửa sổ ngữ cảnh dài lên đến 128K token." + }, + "qwen-coder-turbo-latest": { + "description": "Mô hình mã Qwen." + }, "qwen-long": { "description": "Mô hình ngôn ngữ quy mô lớn Qwen, hỗ trợ ngữ cảnh văn bản dài và chức năng đối thoại dựa trên tài liệu dài, nhiều tài liệu." }, + "qwen-math-plus-latest": { + "description": "Mô hình toán học Qwen được thiết kế đặc biệt để giải quyết các bài toán toán học." + }, + "qwen-math-turbo-latest": { + "description": "Mô hình toán học Qwen được thiết kế đặc biệt để giải quyết các bài toán toán học." + }, + "qwen-max-latest": { + "description": "Mô hình ngôn ngữ quy mô lớn Qwen với hàng trăm tỷ tham số, hỗ trợ đầu vào bằng tiếng Trung, tiếng Anh và nhiều ngôn ngữ khác, là mô hình API đứng sau phiên bản sản phẩm Qwen 2.5 hiện tại." + }, + "qwen-plus-latest": { + "description": "Phiên bản nâng cao của mô hình ngôn ngữ quy mô lớn Qwen, hỗ trợ đầu vào bằng tiếng Trung, tiếng Anh và nhiều ngôn ngữ khác." + }, + "qwen-turbo-latest": { + "description": "Mô hình ngôn ngữ quy mô lớn Qwen, hỗ trợ đầu vào bằng tiếng Trung, tiếng Anh và nhiều ngôn ngữ khác." + }, "qwen-vl-chat-v1": { "description": "Mô hình Qwen VL hỗ trợ các phương thức tương tác linh hoạt, bao gồm nhiều hình ảnh, nhiều vòng hỏi đáp, sáng tạo, v.v." }, @@ -698,6 +811,33 @@ "qwen2": { "description": "Qwen2 là mô hình ngôn ngữ quy mô lớn thế hệ mới của Alibaba, hỗ trợ các nhu cầu ứng dụng đa dạng với hiệu suất xuất sắc." }, + "qwen2.5-14b-instruct": { + "description": "Mô hình 14B quy mô mở nguồn của Qwen 2.5." + }, + "qwen2.5-32b-instruct": { + "description": "Mô hình 32B quy mô mở nguồn của Qwen 2.5." + }, + "qwen2.5-72b-instruct": { + "description": "Mô hình 72B quy mô mở nguồn của Qwen 2.5." + }, + "qwen2.5-7b-instruct": { + "description": "Mô hình 7B quy mô mở nguồn của Qwen 2.5." + }, + "qwen2.5-coder-1.5b-instruct": { + "description": "Phiên bản mã nguồn mở của mô hình mã Qwen." + }, + "qwen2.5-coder-7b-instruct": { + "description": "Phiên bản mã nguồn mở của mô hình mã Qwen." + }, + "qwen2.5-math-1.5b-instruct": { + "description": "Mô hình Qwen-Math có khả năng giải quyết bài toán toán học mạnh mẽ." + }, + "qwen2.5-math-72b-instruct": { + "description": "Mô hình Qwen-Math có khả năng giải quyết bài toán toán học mạnh mẽ." + }, + "qwen2.5-math-7b-instruct": { + "description": "Mô hình Qwen-Math có khả năng giải quyết bài toán toán học mạnh mẽ." + }, "qwen2:0.5b": { "description": "Qwen2 là mô hình ngôn ngữ quy mô lớn thế hệ mới của Alibaba, hỗ trợ các nhu cầu ứng dụng đa dạng với hiệu suất xuất sắc." }, @@ -743,6 +883,9 @@ "taichu_llm": { "description": "Mô hình ngôn ngữ lớn Taichu có khả năng hiểu ngôn ngữ mạnh mẽ và các khả năng như sáng tạo văn bản, trả lời câu hỏi kiến thức, lập trình mã, tính toán toán học, suy luận logic, phân tích cảm xúc, tóm tắt văn bản. Đổi mới kết hợp giữa đào tạo trước với dữ liệu phong phú từ nhiều nguồn, thông qua việc liên tục cải tiến công nghệ thuật toán và hấp thụ kiến thức mới từ dữ liệu văn bản khổng lồ, giúp mô hình ngày càng hoàn thiện. Cung cấp thông tin và dịch vụ tiện lợi hơn cho người dùng cùng trải nghiệm thông minh hơn." }, + "taichu_vqa": { + "description": "Taichu 2.0V kết hợp khả năng hiểu hình ảnh, chuyển giao kiến thức, suy luận logic, v.v., thể hiện xuất sắc trong lĩnh vực hỏi đáp hình ảnh và văn bản." + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) cung cấp khả năng tính toán nâng cao thông qua chiến lược và kiến trúc mô hình hiệu quả." }, diff --git a/locales/vi-VN/providers.json b/locales/vi-VN/providers.json index 87bb933504a3..be0e51f528b0 100644 --- a/locales/vi-VN/providers.json +++ b/locales/vi-VN/providers.json @@ -1,4 +1,5 @@ { + "ai21": {}, "ai360": { "description": "360 AI là nền tảng mô hình và dịch vụ AI do công ty 360 phát hành, cung cấp nhiều mô hình xử lý ngôn ngữ tự nhiên tiên tiến, bao gồm 360GPT2 Pro, 360GPT Pro, 360GPT Turbo và 360GPT Turbo Responsibility 8K. Những mô hình này kết hợp giữa tham số quy mô lớn và khả năng đa phương thức, được ứng dụng rộng rãi trong tạo văn bản, hiểu ngữ nghĩa, hệ thống đối thoại và tạo mã. Thông qua chiến lược giá linh hoạt, 360 AI đáp ứng nhu cầu đa dạng của người dùng, hỗ trợ nhà phát triển tích hợp, thúc đẩy sự đổi mới và phát triển ứng dụng thông minh." }, @@ -20,6 +21,9 @@ "fireworksai": { "description": "Fireworks AI là nhà cung cấp dịch vụ mô hình ngôn ngữ cao cấp hàng đầu, tập trung vào gọi chức năng và xử lý đa phương thức. Mô hình mới nhất của họ, Firefunction V2, dựa trên Llama-3, được tối ưu hóa cho gọi chức năng, đối thoại và tuân theo chỉ dẫn. Mô hình ngôn ngữ hình ảnh FireLLaVA-13B hỗ trợ đầu vào hỗn hợp hình ảnh và văn bản. Các mô hình đáng chú ý khác bao gồm dòng Llama và dòng Mixtral, cung cấp hỗ trợ cho việc tuân theo và tạo ra chỉ dẫn đa ngôn ngữ hiệu quả." }, + "github": { + "description": "Với GitHub Models, các nhà phát triển có thể trở thành kỹ sư AI và xây dựng với các mô hình AI hàng đầu trong ngành." + }, "google": { "description": "Dòng Gemini của Google là mô hình AI tiên tiến và đa năng nhất của họ, được phát triển bởi Google DeepMind, được thiết kế cho đa phương thức, hỗ trợ hiểu và xử lý liền mạch văn bản, mã, hình ảnh, âm thanh và video. Phù hợp cho nhiều môi trường từ trung tâm dữ liệu đến thiết bị di động, nâng cao đáng kể hiệu quả và tính ứng dụng của mô hình AI." }, diff --git a/locales/zh-CN/chat.json b/locales/zh-CN/chat.json index 1381ddf29319..09768a24fb4f 100644 --- a/locales/zh-CN/chat.json +++ b/locales/zh-CN/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "你好,我是 **{{name}}**,你可以立即与我开始对话,也可以前往 [助手设置]({{url}}) 完善我的信息。", "agentDefaultMessageWithSystemRole": "你好,我是 **{{name}}**,{{systemRole}},让我们开始对话吧!", "agentDefaultMessageWithoutEdit": "你好,我是 **{{name}}**,让我们开始对话吧!", - "agentsAndConversations": "助手与会话", + "agents": "助手", "artifact": { "generating": "生成中", "thinking": "思考中", @@ -81,7 +81,7 @@ }, "regenerate": "重新生成", "roleAndArchive": "角色与记录", - "searchAgentPlaceholder": "搜索助手和对话...", + "searchAgentPlaceholder": "搜索助手...", "sendPlaceholder": "输入聊天内容...", "sessionGroup": { "config": "分组管理", diff --git a/locales/zh-CN/models.json b/locales/zh-CN/models.json index d77bdcf9e255..a6f862c8cf50 100644 --- a/locales/zh-CN/models.json +++ b/locales/zh-CN/models.json @@ -18,7 +18,7 @@ "description": "360GPT2 Pro 是 360 公司推出的高级自然语言处理模型,具备卓越的文本生成和理解能力,尤其在生成与创作领域表现出色,能够处理复杂的语言转换和角色演绎任务。" }, "4.0Ultra": { - "description": "Spark4.0 Ultra 是星火大模型系列中最为强大的版本,在升级联网搜索链路同时,提升对文本内容的理解和总结能力。它是用于提升办公生产力和准确响应需求的全方位解决方案,是引领行业的智能产品。" + "description": "Spark Ultra 是星火大模型系列中最为强大的版本,在升级联网搜索链路同时,提升对文本内容的理解和总结能力。它是用于提升办公生产力和准确响应需求的全方位解决方案,是引领行业的智能产品。" }, "Baichuan2-Turbo": { "description": "采用搜索增强技术实现大模型与领域知识、全网知识的全面链接。支持PDF、Word等多种文档上传及网址输入,信息获取及时、全面,输出结果准确、专业。" @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) 是一种创新模型,适合多领域应用和复杂任务。" }, + "Max-32k": { + "description": "Spark Max 32K 配置了大上下文处理能力,更强的上下文理解和逻辑推理能力,支持32K tokens的文本输入,适用于长文档阅读、私有知识问答等场景" + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度灵活的多模型合并,旨在提供卓越的创造性体验。" }, @@ -66,7 +69,7 @@ "description": "A 7B parameters model, proves better quality than Phi-3-mini, with a focus on high-quality, reasoning-dense data." }, "Pro-128k": { - "description": "Spark Pro-128K 配置了特大上下文处理能力,能够处理多达128K的上下文信息,特别适合需通篇分析和长期逻辑关联处理的长文内容,可在复杂文本沟通中提供流畅一致的逻辑与多样的引用支持。" + "description": "Spark Pro 128K 配置了特大上下文处理能力,能够处理多达128K的上下文信息,特别适合需通篇分析和长期逻辑关联处理的长文内容,可在复杂文本沟通中提供流畅一致的逻辑与多样的引用支持。" }, "Qwen/Qwen1.5-110B-Chat": { "description": "Qwen 1.5 Chat (110B) 是一款高效能的对话模型,支持复杂对话场景。" @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 是一款高效的多模态模型,支持广泛应用的扩展。" }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 是一款高效的多模态模型,支持广泛应用的扩展。" + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 专为处理大规模任务场景设计,提供无与伦比的处理速度。" }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 是最新的实验性模型,在文本和多模态用例中都有显著的性能提升。" + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 提供了优化后的多模态处理能力,适用多种复杂任务场景。" }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 是可扩展的多模态AI解决方案,支持广泛的复杂任务。" }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 是最新的生产就绪模型,提供更高质量的输出,特别在数学、长上下文和视觉任务方面有显著提升。" + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 提供出色的多模态处理能力,为应用开发带来更大灵活性。" }, @@ -372,7 +384,7 @@ "description": "Spark Pro 是一款为专业领域优化的高性能大语言模型,专注数学、编程、医疗、教育等多个领域,并支持联网搜索及内置天气、日期等插件。其优化后模型在复杂知识问答、语言理解及高层次文本创作中展现出色表现和高效性能,是适合专业应用场景的理想选择。" }, "generalv3.5": { - "description": "Spark3.5 Max 为功能最为全面的版本,支持联网搜索及众多内置插件。其全面优化的核心能力以及系统角色设定和函数调用功能,使其在各种复杂应用场景中的表现极为优异和出色。" + "description": "Spark Max 为功能最为全面的版本,支持联网搜索及众多内置插件。其全面优化的核心能力以及系统角色设定和函数调用功能,使其在各种复杂应用场景中的表现极为优异和出色。" }, "glm-4": { "description": "GLM-4 是发布于2024年1月的旧旗舰版本,目前已被更强的 GLM-4-0520 取代。" @@ -869,7 +881,10 @@ "description": "支持大规模上下文交互,适合复杂对话场景。" }, "taichu_llm": { - "description": "紫东太初语言大模型具备超强语言理解能力以及文本创作、知识问答、代码编程、数学计算、逻辑推理、情感分析、文本摘要等能力。创新性地将大数据预训练与多源丰富知识相结合,通过持续打磨算法技术,并不断吸收海量文本数据中词汇、结构、语法、语义等方面的新知识,实现模型效果不断进化。为用户提供更加便捷的信息和服务以及更为智能化的体验。" + "description": "Taichu 2.0 基于海量高质数据训练,具有更强的文本理解、内容创作、对话问答等能力" + }, + "taichu_vqa": { + "description": "Taichu 2.0V 融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出" }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) 通过高效的策略和模型架构,提供增强的计算能力。" diff --git a/locales/zh-TW/chat.json b/locales/zh-TW/chat.json index 1825a870a54b..b6fb80301a8b 100644 --- a/locales/zh-TW/chat.json +++ b/locales/zh-TW/chat.json @@ -5,7 +5,7 @@ "agentDefaultMessage": "你好,我是 **{{name}}**,你可以立即與我開始對話,也可以前往 [助手設定]({{url}}) 完善我的資訊。", "agentDefaultMessageWithSystemRole": "你好,我是 **{{name}}**,{{systemRole}},讓我們開始對話吧!", "agentDefaultMessageWithoutEdit": "你好,我是 **{{name}}**,讓我們開始對話吧!", - "agentsAndConversations": "助理與對話", + "agents": "助手", "artifact": { "generating": "生成中", "thinking": "思考中", @@ -81,7 +81,7 @@ }, "regenerate": "重新生成", "roleAndArchive": "角色與記錄", - "searchAgentPlaceholder": "搜索助手和對話...", + "searchAgentPlaceholder": "搜尋助手...", "sendPlaceholder": "輸入聊天內容...", "sessionGroup": { "config": "分組管理", diff --git a/locales/zh-TW/models.json b/locales/zh-TW/models.json index 148991db9479..7262251e1ed8 100644 --- a/locales/zh-TW/models.json +++ b/locales/zh-TW/models.json @@ -35,6 +35,9 @@ "Gryphe/MythoMax-L2-13b": { "description": "MythoMax-L2 (13B) 是一種創新模型,適合多領域應用和複雜任務。" }, + "Max-32k": { + "description": "Spark Max 32K 配備了更強大的上下文處理能力,具備更佳的上下文理解和邏輯推理能力,支持32K tokens的文本輸入,適用於長文檔閱讀、私有知識問答等場景" + }, "Nous-Hermes-2-Mixtral-8x7B-DPO": { "description": "Hermes 2 Mixtral 8x7B DPO 是一款高度靈活的多模型合併,旨在提供卓越的創造性體驗。" }, @@ -329,9 +332,15 @@ "gemini-1.5-flash-001": { "description": "Gemini 1.5 Flash 001 是一款高效的多模態模型,支持廣泛應用的擴展。" }, + "gemini-1.5-flash-002": { + "description": "Gemini 1.5 Flash 002 是一款高效的多模態模型,支持廣泛應用的擴展。" + }, "gemini-1.5-flash-8b-exp-0827": { "description": "Gemini 1.5 Flash 8B 0827 專為處理大規模任務場景設計,提供無與倫比的處理速度。" }, + "gemini-1.5-flash-8b-exp-0924": { + "description": "Gemini 1.5 Flash 8B 0924 是最新的實驗性模型,在文本和多模態用例中都有顯著的性能提升。" + }, "gemini-1.5-flash-exp-0827": { "description": "Gemini 1.5 Flash 0827 提供了優化後的多模態處理能力,適用多種複雜任務場景。" }, @@ -341,6 +350,9 @@ "gemini-1.5-pro-001": { "description": "Gemini 1.5 Pro 001 是可擴展的多模態 AI 解決方案,支持廣泛的複雜任務。" }, + "gemini-1.5-pro-002": { + "description": "Gemini 1.5 Pro 002 是最新的生產就緒模型,提供更高品質的輸出,特別在數學、長上下文和視覺任務方面有顯著提升。" + }, "gemini-1.5-pro-exp-0801": { "description": "Gemini 1.5 Pro 0801 提供出色的多模態處理能力,為應用開發帶來更大靈活性。" }, @@ -871,6 +883,9 @@ "taichu_llm": { "description": "紫東太初語言大模型具備超強語言理解能力以及文本創作、知識問答、代碼編程、數學計算、邏輯推理、情感分析、文本摘要等能力。創新性地將大數據預訓練與多源豐富知識相結合,通過持續打磨算法技術,並不斷吸收海量文本數據中詞彙、結構、語法、語義等方面的新知識,實現模型效果不斷進化。為用戶提供更加便捷的信息和服務以及更為智能化的體驗。" }, + "taichu_vqa": { + "description": "Taichu 2.0V 融合了圖像理解、知識遷移、邏輯歸因等能力,在圖文問答領域表現突出。" + }, "togethercomputer/StripedHyena-Nous-7B": { "description": "StripedHyena Nous (7B) 通過高效的策略和模型架構,提供增強的計算能力。" }, diff --git a/package.json b/package.json index dfb9621f1b4e..750039166ab2 100644 --- a/package.json +++ b/package.json @@ -1,6 +1,6 @@ { "name": "@lobehub/chat", - "version": "1.19.22", + "version": "1.19.31", "description": "Lobe Chat - an open-source, high-performance chatbot framework that supports speech synthesis, multimodal, and extensible Function Call plugin system. Supports one-click free deployment of your private ChatGPT/LLM web application.", "keywords": [ "framework", diff --git a/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/index.tsx b/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/index.tsx index 144bb14ca1d5..f473862fb148 100644 --- a/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/index.tsx +++ b/src/app/(main)/chat/(workspace)/@portal/Artifacts/Body/index.tsx @@ -4,6 +4,7 @@ import { Flexbox } from 'react-layout-kit'; import { useChatStore } from '@/store/chat'; import { chatPortalSelectors, chatSelectors } from '@/store/chat/selectors'; +import { ArtifactType } from '@/types/artifact'; import Renderer from './Renderer'; @@ -14,7 +15,7 @@ const ArtifactsUI = memo(() => { isMessageGenerating, artifactType, artifactContent, - + artifactCodeLanguage, isArtifactTagClosed, ] = useChatStore((s) => { const messageId = chatPortalSelectors.artifactMessageId(s) || ''; @@ -25,6 +26,7 @@ const ArtifactsUI = memo(() => { chatSelectors.isMessageGenerating(messageId)(s), chatPortalSelectors.artifactType(s), chatPortalSelectors.artifactCode(messageId)(s), + chatPortalSelectors.artifactCodeLanguage(s), chatPortalSelectors.isArtifactTagClosed(messageId)(s), ]; }); @@ -39,11 +41,15 @@ const ArtifactsUI = memo(() => { const language = useMemo(() => { switch (artifactType) { - case 'application/lobe.artifacts.react': { + case ArtifactType.React: { return 'tsx'; } - case 'python': { + case ArtifactType.Code: { + return artifactCodeLanguage; + } + + case ArtifactType.Python: { return 'python'; } @@ -51,11 +57,15 @@ const ArtifactsUI = memo(() => { return 'html'; } } - }, [artifactType]); + }, [artifactType, artifactCodeLanguage]); // make sure the message and id is valid if (!messageId) return; + // show code when the artifact is not closed or the display mode is code or the artifact type is code + const showCode = + !isArtifactTagClosed || displayMode === 'code' || artifactType === ArtifactType.Code; + return ( { paddingInline={12} style={{ overflow: 'hidden' }} > - {!isArtifactTagClosed || displayMode === 'code' ? ( - + {showCode ? ( + {artifactContent} ) : ( diff --git a/src/app/(main)/chat/(workspace)/@portal/Artifacts/Header.tsx b/src/app/(main)/chat/(workspace)/@portal/Artifacts/Header.tsx index 17221e828b49..78d4efcb6799 100644 --- a/src/app/(main)/chat/(workspace)/@portal/Artifacts/Header.tsx +++ b/src/app/(main)/chat/(workspace)/@portal/Artifacts/Header.tsx @@ -8,20 +8,26 @@ import { Flexbox } from 'react-layout-kit'; import { useChatStore } from '@/store/chat'; import { chatPortalSelectors } from '@/store/chat/selectors'; import { oneLineEllipsis } from '@/styles'; +import { ArtifactType } from '@/types/artifact'; const Header = () => { const { t } = useTranslation('portal'); - const [displayMode, artifactTitle, isArtifactTagClosed, closeArtifact] = useChatStore((s) => { - const messageId = chatPortalSelectors.artifactMessageId(s) || ''; + const [displayMode, artifactType, artifactTitle, isArtifactTagClosed, closeArtifact] = + useChatStore((s) => { + const messageId = chatPortalSelectors.artifactMessageId(s) || ''; - return [ - s.portalArtifactDisplayMode, - chatPortalSelectors.artifactTitle(s), - chatPortalSelectors.isArtifactTagClosed(messageId)(s), - s.closeArtifact, - ]; - }); + return [ + s.portalArtifactDisplayMode, + chatPortalSelectors.artifactType(s), + chatPortalSelectors.artifactTitle(s), + chatPortalSelectors.isArtifactTagClosed(messageId)(s), + s.closeArtifact, + ]; + }); + + // show switch only when artifact is closed and the type is not code + const showSwitch = isArtifactTagClosed && artifactType !== ArtifactType.Code; return ( @@ -44,7 +50,7 @@ const Header = () => { }, }} > - {isArtifactTagClosed && ( + {showSwitch && ( { useChatStore.setState({ portalArtifactDisplayMode: value }); diff --git a/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx b/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx index a851dc4c4347..8c7a81a79a80 100644 --- a/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx +++ b/src/app/(main)/chat/(workspace)/_layout/Desktop/ChatHeader/Main.tsx @@ -51,7 +51,7 @@ const Main = memo(() => { { { updateSystemStatus({ @@ -60,7 +60,7 @@ const Main = memo(() => { }); }} size={DESKTOP_HEADER_ICON_SIZE} - title={t('agentsAndConversations')} + title={t('agents')} /> } { BaichuanProviderCard, MinimaxProviderCard, Ai360ProviderCard, - SiliconCloudProviderCard, TaichuProviderCard, + SiliconCloudProviderCard, ], [AzureProvider, OllamaProvider, OpenAIProvider, BedrockProvider, GithubProvider], ); diff --git a/src/components/GalleyGrid/index.tsx b/src/components/GalleyGrid/index.tsx index dd0cd3eac3d6..f8c9d03ff1cb 100644 --- a/src/components/GalleyGrid/index.tsx +++ b/src/components/GalleyGrid/index.tsx @@ -21,7 +21,7 @@ const GalleyGrid = memo(({ items, renderItem: Render }) => { }; } - const firstCol = items.length % 3 === 0 ? 3 : items.length % 3; + const firstCol = items.length > 4 ? 3 : items.length; return { firstRow: items.slice(0, firstCol), @@ -29,13 +29,16 @@ const GalleyGrid = memo(({ items, renderItem: Render }) => { }; }, [items]); - const { gap, max } = useMemo( - () => ({ + const { gap, max } = useMemo(() => { + let scale = firstRow.length * (firstRow.length / items.length); + + scale = scale < 1 ? 1 : scale; + + return { gap: mobile ? 4 : 6, - max: (mobile ? MAX_SIZE_MOBILE : MAX_SIZE_DESKTOP) * firstRow.length, - }), - [mobile], - ); + max: (mobile ? MAX_SIZE_MOBILE : MAX_SIZE_DESKTOP) * scale, + }; + }, [mobile, items]); return ( @@ -45,7 +48,7 @@ const GalleyGrid = memo(({ items, renderItem: Render }) => { ))} {lastRow.length > 0 && ( - 2 ? 3 : lastRow.length} gap={gap} max={max}> + {lastRow.map((i, index) => ( ))} diff --git a/src/config/modelProviders/ai360.ts b/src/config/modelProviders/ai360.ts index 7d40b45b1fa5..2f2f5fab7e78 100644 --- a/src/config/modelProviders/ai360.ts +++ b/src/config/modelProviders/ai360.ts @@ -8,7 +8,6 @@ const Ai360: ModelProviderCard = { '360GPT2 Pro 是 360 公司推出的高级自然语言处理模型,具备卓越的文本生成和理解能力,尤其在生成与创作领域表现出色,能够处理复杂的语言转换和角色演绎任务。', displayName: '360GPT2 Pro', enabled: true, - functionCall: false, id: '360gpt2-pro', maxOutput: 7000, pricing: { @@ -22,6 +21,8 @@ const Ai360: ModelProviderCard = { description: '360GPT Pro 作为 360 AI 模型系列的重要成员,以高效的文本处理能力满足多样化的自然语言应用场景,支持长文本理解和多轮对话等功能。', displayName: '360GPT Pro', + enabled: true, + functionCall: true, id: '360gpt-pro', maxOutput: 7000, pricing: { diff --git a/src/config/modelProviders/google.ts b/src/config/modelProviders/google.ts index babb9e83cacc..62550c12c617 100644 --- a/src/config/modelProviders/google.ts +++ b/src/config/modelProviders/google.ts @@ -22,7 +22,6 @@ const Google: ModelProviderCard = { { description: 'Gemini 1.5 Flash 0827 提供了优化后的多模态处理能力,适用多种复杂任务场景。', displayName: 'Gemini 1.5 Flash 0827', - enabled: true, functionCall: true, id: 'gemini-1.5-flash-exp-0827', maxOutput: 8192, @@ -45,12 +44,49 @@ const Google: ModelProviderCard = { tokens: 1_048_576 + 8192, vision: true, }, + { + description: + 'Gemini 1.5 Flash 8B 0924 是最新的实验性模型,在文本和多模态用例中都有显著的性能提升。', + displayName: 'Gemini 1.5 Flash 8B 0924', + enabled: true, + functionCall: true, + id: 'gemini-1.5-flash-8b-exp-0924', + maxOutput: 8192, + pricing: { + cachedInput: 0.018_75, + input: 0.075, + output: 0.3, + }, + releasedAt: '2024-09-24', + tokens: 1_048_576 + 8192, + vision: true, + }, { description: 'Gemini 1.5 Flash 001 是一款高效的多模态模型,支持广泛应用的扩展。', displayName: 'Gemini 1.5 Flash 001', functionCall: true, id: 'gemini-1.5-flash-001', maxOutput: 8192, + pricing: { + cachedInput: 0.018_75, + input: 0.075, + output: 0.3, + }, + tokens: 1_048_576 + 8192, + vision: true, + }, + { + description: 'Gemini 1.5 Flash 002 是一款高效的多模态模型,支持广泛应用的扩展。', + displayName: 'Gemini 1.5 Flash 002', + functionCall: true, + id: 'gemini-1.5-flash-002', + maxOutput: 8192, + pricing: { + cachedInput: 0.018_75, + input: 0.075, + output: 0.3, + }, + releasedAt: '2024-09-25', tokens: 1_048_576 + 8192, vision: true, }, @@ -74,7 +110,6 @@ const Google: ModelProviderCard = { { description: 'Gemini 1.5 Pro 0827 结合最新优化技术,带来更高效的多模态数据处理能力。', displayName: 'Gemini 1.5 Pro 0827', - enabled: true, functionCall: true, id: 'gemini-1.5-pro-exp-0827', maxOutput: 8192, @@ -117,6 +152,23 @@ const Google: ModelProviderCard = { tokens: 2_097_152 + 8192, vision: true, }, + { + description: + 'Gemini 1.5 Pro 002 是最新的生产就绪模型,提供更高质量的输出,特别在数学、长上下文和视觉任务方面有显著提升。', + displayName: 'Gemini 1.5 Pro 002', + enabled: true, + functionCall: true, + id: 'gemini-1.5-pro-002', + maxOutput: 8192, + pricing: { + cachedInput: 0.315, + input: 1.25, + output: 2.5, + }, + releasedAt: '2024-09-24', + tokens: 2_097_152 + 8192, + vision: true, + }, { description: 'Gemini 1.0 Pro 是Google的高性能AI模型,专为广泛任务扩展而设计。', displayName: 'Gemini 1.0 Pro', diff --git a/src/config/modelProviders/index.ts b/src/config/modelProviders/index.ts index 33bfd8d62b03..27094172b378 100644 --- a/src/config/modelProviders/index.ts +++ b/src/config/modelProviders/index.ts @@ -86,8 +86,8 @@ export const DEFAULT_MODEL_PROVIDER_LIST = [ BaichuanProvider, MinimaxProvider, Ai360Provider, - SiliconCloudProvider, TaichuProvider, + SiliconCloudProvider, ]; export const filterEnabledModels = (provider: ModelProviderCard) => { diff --git a/src/config/modelProviders/spark.ts b/src/config/modelProviders/spark.ts index 4eaba9d8d658..07b3f6bc11de 100644 --- a/src/config/modelProviders/spark.ts +++ b/src/config/modelProviders/spark.ts @@ -26,18 +26,18 @@ const Spark: ModelProviderCard = { }, { description: - 'Spark Pro-128K 配置了特大上下文处理能力,能够处理多达128K的上下文信息,特别适合需通篇分析和长期逻辑关联处理的长文内容,可在复杂文本沟通中提供流畅一致的逻辑与多样的引用支持。', - displayName: 'Spark Pro-128K', + 'Spark Pro 128K 配置了特大上下文处理能力,能够处理多达128K的上下文信息,特别适合需通篇分析和长期逻辑关联处理的长文内容,可在复杂文本沟通中提供流畅一致的逻辑与多样的引用支持。', + displayName: 'Spark Pro 128K', enabled: true, functionCall: false, id: 'Pro-128k', maxOutput: 4096, - tokens: 128_000, + tokens: 131_072, }, { description: - 'Spark3.5 Max 为功能最为全面的版本,支持联网搜索及众多内置插件。其全面优化的核心能力以及系统角色设定和函数调用功能,使其在各种复杂应用场景中的表现极为优异和出色。', - displayName: 'Spark3.5 Max', + 'Spark Max 为功能最为全面的版本,支持联网搜索及众多内置插件。其全面优化的核心能力以及系统角色设定和函数调用功能,使其在各种复杂应用场景中的表现极为优异和出色。', + displayName: 'Spark 3.5 Max', enabled: true, functionCall: false, id: 'generalv3.5', @@ -46,8 +46,18 @@ const Spark: ModelProviderCard = { }, { description: - 'Spark4.0 Ultra 是星火大模型系列中最为强大的版本,在升级联网搜索链路同时,提升对文本内容的理解和总结能力。它是用于提升办公生产力和准确响应需求的全方位解决方案,是引领行业的智能产品。', - displayName: 'Spark4.0 Ultra', + 'Spark Max 32K 配置了大上下文处理能力,更强的上下文理解和逻辑推理能力,支持32K tokens的文本输入,适用于长文档阅读、私有知识问答等场景', + displayName: 'Spark 3.5 Max 32K', + enabled: true, + functionCall: false, + id: 'Max-32k', + maxOutput: 8192, + tokens: 32_768, + }, + { + description: + 'Spark Ultra 是星火大模型系列中最为强大的版本,在升级联网搜索链路同时,提升对文本内容的理解和总结能力。它是用于提升办公生产力和准确响应需求的全方位解决方案,是引领行业的智能产品。', + displayName: 'Spark 4.0 Ultra', enabled: true, functionCall: false, id: '4.0Ultra', diff --git a/src/config/modelProviders/stepfun.ts b/src/config/modelProviders/stepfun.ts index b37da210f792..1e4a022484cb 100644 --- a/src/config/modelProviders/stepfun.ts +++ b/src/config/modelProviders/stepfun.ts @@ -8,12 +8,14 @@ const Stepfun: ModelProviderCard = { description: '支持大规模上下文交互,适合复杂对话场景。', displayName: 'Step 2 16K', enabled: true, + functionCall: true, id: 'step-2-16k', tokens: 16_000, }, { description: '具备超长上下文处理能力,尤其适合长文档分析。', displayName: 'Step 1 256K', + functionCall: true, id: 'step-1-256k', tokens: 256_000, }, @@ -21,6 +23,7 @@ const Stepfun: ModelProviderCard = { description: '平衡性能与成本,适合一般场景。', displayName: 'Step 1 128K', enabled: true, + functionCall: true, id: 'step-1-128k', tokens: 128_000, }, @@ -28,6 +31,7 @@ const Stepfun: ModelProviderCard = { description: '支持中等长度的对话,适用于多种应用场景。', displayName: 'Step 1 32K', enabled: true, + functionCall: true, id: 'step-1-32k', tokens: 32_000, }, @@ -35,6 +39,7 @@ const Stepfun: ModelProviderCard = { description: '小型模型,适合轻量级任务。', displayName: 'Step 1 8K', enabled: true, + functionCall: true, id: 'step-1-8k', tokens: 8000, }, @@ -42,6 +47,7 @@ const Stepfun: ModelProviderCard = { description: '高速模型,适合实时对话。', displayName: 'Step 1 Flash', enabled: true, + functionCall: true, id: 'step-1-flash', tokens: 8000, }, @@ -49,6 +55,7 @@ const Stepfun: ModelProviderCard = { description: '支持视觉输入,增强多模态交互体验。', displayName: 'Step 1V 32K', enabled: true, + functionCall: true, id: 'step-1v-32k', tokens: 32_000, vision: true, @@ -57,6 +64,7 @@ const Stepfun: ModelProviderCard = { description: '小型视觉模型,适合基本的图文任务。', displayName: 'Step 1V 8K', enabled: true, + functionCall: true, id: 'step-1v-8k', tokens: 8000, vision: true, diff --git a/src/config/modelProviders/taichu.ts b/src/config/modelProviders/taichu.ts index b5bde15c0a06..f51eb1b320bd 100644 --- a/src/config/modelProviders/taichu.ts +++ b/src/config/modelProviders/taichu.ts @@ -5,13 +5,21 @@ const Taichu: ModelProviderCard = { chatModels: [ { description: - '紫东太初语言大模型具备超强语言理解能力以及文本创作、知识问答、代码编程、数学计算、逻辑推理、情感分析、文本摘要等能力。创新性地将大数据预训练与多源丰富知识相结合,通过持续打磨算法技术,并不断吸收海量文本数据中词汇、结构、语法、语义等方面的新知识,实现模型效果不断进化。为用户提供更加便捷的信息和服务以及更为智能化的体验。', - displayName: 'Taichu-2.0', + 'Taichu 2.0 基于海量高质数据训练,具有更强的文本理解、内容创作、对话问答等能力', + displayName: 'Taichu 2.0', enabled: true, functionCall: false, id: 'taichu_llm', tokens: 32_768, }, + { + description: + 'Taichu 2.0V 融合了图像理解、知识迁移、逻辑归因等能力,在图文问答领域表现突出', + displayName: 'Taichu 2.0V', + id: 'taichu_vqa', + tokens: 4096, + vision: true, + }, ], checkModel: 'taichu_llm', description: diff --git a/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx b/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx index 07f4f237378b..e26135d65c1b 100644 --- a/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx +++ b/src/features/Conversation/components/MarkdownElements/LobeArtifact/Render/index.tsx @@ -48,11 +48,12 @@ const useStyles = createStyles(({ css, token, isDarkMode }) => ({ interface ArtifactProps extends MarkdownElementProps { identifier: string; + language?: string; title: string; type: string; } -const Render = memo(({ identifier, title, type, children, id }) => { +const Render = memo(({ identifier, title, type, language, children, id }) => { const { t } = useTranslation('chat'); const { styles, cx } = useStyles(); @@ -71,14 +72,14 @@ const Render = memo(({ identifier, title, type, children, id }) = }); const openArtifactUI = () => { - openArtifact({ id, identifier, title, type }); + openArtifact({ id, identifier, language, title, type }); }; useEffect(() => { if (!hasChildren || !isGenerating) return; openArtifactUI(); - }, [isGenerating, hasChildren, str, identifier, title, type, id]); + }, [isGenerating, hasChildren, str, identifier, title, type, id, language]); return (

diff --git a/src/layout/GlobalProvider/StoreInitialization.tsx b/src/layout/GlobalProvider/StoreInitialization.tsx index 462b4b483a25..24345ef8ec8b 100644 --- a/src/layout/GlobalProvider/StoreInitialization.tsx +++ b/src/layout/GlobalProvider/StoreInitialization.tsx @@ -20,12 +20,14 @@ const StoreInitialization = memo(() => { useTranslation('error'); const router = useRouter(); - const [isLogin, isSignedIn, useInitUserState, importUrlShareSettings] = useUserStore((s) => [ - authSelectors.isLogin(s), - s.isSignedIn, - s.useInitUserState, - s.importUrlShareSettings, - ]); + const [isLogin, isSignedIn, useInitUserState, importUrlShareSettings, isUserStateInit] = + useUserStore((s) => [ + authSelectors.isLogin(s), + s.isSignedIn, + s.useInitUserState, + s.importUrlShareSettings, + s.isUserStateInit, + ]); const { serverConfig } = useServerConfigStore(); @@ -74,8 +76,10 @@ const StoreInitialization = memo(() => { // Import settings from the url const searchParam = useSearchParams().get(LOBE_URL_IMPORT_NAME); useEffect(() => { - importUrlShareSettings(searchParam); - }, [searchParam]); + // Why use `usUserStateInit`, + // see: https://github.com/lobehub/lobe-chat/pull/4072 + if (searchParam && isUserStateInit) importUrlShareSettings(searchParam); + }, [searchParam, isUserStateInit]); useEffect(() => { if (mobile) { diff --git a/src/libs/agent-runtime/ai360/index.ts b/src/libs/agent-runtime/ai360/index.ts index 86c199154a16..c931293363ae 100644 --- a/src/libs/agent-runtime/ai360/index.ts +++ b/src/libs/agent-runtime/ai360/index.ts @@ -3,6 +3,14 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory'; export const LobeAi360AI = LobeOpenAICompatibleFactory({ baseURL: 'https://ai.360.cn/v1', + chatCompletion: { + handlePayload: (payload) => { + return { + ...payload, + stream: !payload.tools, + } as any; + }, + }, debug: { chatCompletion: () => process.env.DEBUG_AI360_CHAT_COMPLETION === '1', }, diff --git a/src/libs/agent-runtime/google/index.test.ts b/src/libs/agent-runtime/google/index.test.ts index 8b54fb8aabbe..469286a41eb6 100644 --- a/src/libs/agent-runtime/google/index.test.ts +++ b/src/libs/agent-runtime/google/index.test.ts @@ -304,6 +304,30 @@ describe('LobeGoogleAI', () => { describe('private method', () => { describe('convertContentToGooglePart', () => { + it('should handle text type messages', async () => { + const result = await instance['convertContentToGooglePart']({ + type: 'text', + text: 'Hello', + }); + expect(result).toEqual({ text: 'Hello' }); + }); + + it('should handle base64 type images', async () => { + const base64Image = + 'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg=='; + const result = await instance['convertContentToGooglePart']({ + type: 'image_url', + image_url: { url: base64Image }, + }); + + expect(result).toEqual({ + inlineData: { + data: 'iVBORw0KGgoAAAANSUhEUgAAAAUAAAAFCAYAAACNbyblAAAAHElEQVQI12P4//8/w38GIAXDIBKE0DHxgljNBAAO9TXL0Y4OHwAAAABJRU5ErkJggg==', + mimeType: 'image/png', + }, + }); + }); + it('should handle URL type images', async () => { const imageUrl = 'http://example.com/image.png'; const mockBase64 = 'mockBase64Data'; @@ -357,7 +381,7 @@ describe('LobeGoogleAI', () => { { content: 'Hi', role: 'assistant' }, ]; - const contents = await instance['buildGoogleMessages'](messages, 'gemini-pro'); + const contents = await instance['buildGoogleMessages'](messages, 'gemini-1.0'); expect(contents).toHaveLength(3); expect(contents).toEqual([ @@ -373,7 +397,7 @@ describe('LobeGoogleAI', () => { { content: 'Who are you', role: 'user' }, ]; - const contents = await instance['buildGoogleMessages'](messages, 'gemini-pro'); + const contents = await instance['buildGoogleMessages'](messages, 'gemini-1.0'); expect(contents).toHaveLength(3); expect(contents).toEqual([ @@ -487,9 +511,6 @@ describe('LobeGoogleAI', () => { }); }); - // 类似地添加 array/string/number/boolean 类型schema的测试用例 - // ... - it('should correctly convert nested schema', () => { const schema: JSONSchema7 = { type: 'object', @@ -523,6 +544,36 @@ describe('LobeGoogleAI', () => { }, }); }); + + it('should correctly convert array schema', () => { + const schema: JSONSchema7 = { + type: 'array', + items: { type: 'string' }, + }; + const converted = instance['convertSchemaObject'](schema); + expect(converted).toEqual({ + type: FunctionDeclarationSchemaType.ARRAY, + items: { type: FunctionDeclarationSchemaType.STRING }, + }); + }); + + it('should correctly convert string schema', () => { + const schema: JSONSchema7 = { type: 'string' }; + const converted = instance['convertSchemaObject'](schema); + expect(converted).toEqual({ type: FunctionDeclarationSchemaType.STRING }); + }); + + it('should correctly convert number schema', () => { + const schema: JSONSchema7 = { type: 'number' }; + const converted = instance['convertSchemaObject'](schema); + expect(converted).toEqual({ type: FunctionDeclarationSchemaType.NUMBER }); + }); + + it('should correctly convert boolean schema', () => { + const schema: JSONSchema7 = { type: 'boolean' }; + const converted = instance['convertSchemaObject'](schema); + expect(converted).toEqual({ type: FunctionDeclarationSchemaType.BOOLEAN }); + }); }); describe('convertOAIMessagesToGoogleMessage', () => { @@ -592,6 +643,49 @@ describe('LobeGoogleAI', () => { ], }); }); + + it('should correctly convert function call message', async () => { + const message = { + role: 'assistant', + tool_calls: [ + { + id: 'call_1', + function: { + name: 'get_current_weather', + arguments: JSON.stringify({ location: 'London', unit: 'celsius' }), + }, + type: 'function', + }, + ], + } as OpenAIChatMessage; + + const converted = await instance['convertOAIMessagesToGoogleMessage'](message); + expect(converted).toEqual({ + role: 'function', + parts: [ + { + functionCall: { + name: 'get_current_weather', + args: { location: 'London', unit: 'celsius' }, + }, + }, + ], + }); + }); + + it('should correctly handle empty content', async () => { + const message: OpenAIChatMessage = { + role: 'user', + content: '' as any, // explicitly set as empty string + }; + + const converted = await instance['convertOAIMessagesToGoogleMessage'](message); + + expect(converted).toEqual({ + role: 'user', + parts: [{ text: '' }], + }); + }); }); }); }); diff --git a/src/libs/agent-runtime/google/index.ts b/src/libs/agent-runtime/google/index.ts index 1c6130aa3199..ecc80c9d2855 100644 --- a/src/libs/agent-runtime/google/index.ts +++ b/src/libs/agent-runtime/google/index.ts @@ -1,5 +1,6 @@ import { Content, + FunctionCallPart, FunctionDeclaration, FunctionDeclarationSchemaProperty, FunctionDeclarationSchemaType, @@ -11,6 +12,7 @@ import { JSONSchema7 } from 'json-schema'; import { transform } from 'lodash-es'; import { imageUrlToBase64 } from '@/utils/imageToBase64'; +import { safeParseJSON } from '@/utils/safeParseJSON'; import { LobeRuntimeAI } from '../BaseAI'; import { AgentRuntimeErrorType, ILobeAgentRuntimeErrorType } from '../error'; @@ -50,8 +52,9 @@ export class LobeGoogleAI implements LobeRuntimeAI { this.baseURL = baseURL; } - async chat(payload: ChatStreamPayload, options?: ChatCompetitionOptions) { + async chat(rawPayload: ChatStreamPayload, options?: ChatCompetitionOptions) { try { + const payload = this.buildPayload(rawPayload); const model = payload.model; const contents = await this.buildGoogleMessages(payload.messages, model); @@ -88,7 +91,11 @@ export class LobeGoogleAI implements LobeRuntimeAI { }, { apiVersion: 'v1beta', baseUrl: this.baseURL }, ) - .generateContentStream({ contents, tools: this.buildGoogleTools(payload.tools) }); + .generateContentStream({ + contents, + systemInstruction: payload.system as string, + tools: this.buildGoogleTools(payload.tools), + }); const googleStream = googleGenAIResultToStream(geminiStreamResult); const [prod, useForDebug] = googleStream.tee(); @@ -111,6 +118,16 @@ export class LobeGoogleAI implements LobeRuntimeAI { } } + private buildPayload(payload: ChatStreamPayload) { + const system_message = payload.messages.find((m) => m.role === 'system'); + const user_messages = payload.messages.filter((m) => m.role !== 'system'); + + return { + ...payload, + messages: user_messages, + system: system_message?.content, + }; + } private convertContentToGooglePart = async (content: UserMessageContentPart): Promise => { switch (content.type) { case 'text': { @@ -152,6 +169,17 @@ export class LobeGoogleAI implements LobeRuntimeAI { message: OpenAIChatMessage, ): Promise => { const content = message.content as string | UserMessageContentPart[]; + if (!!message.tool_calls) { + return { + parts: message.tool_calls.map((tool) => ({ + functionCall: { + args: safeParseJSON(tool.function.arguments)!, + name: tool.function.name, + }, + })), + role: 'function', + }; + } return { parts: @@ -168,44 +196,44 @@ export class LobeGoogleAI implements LobeRuntimeAI { messages: OpenAIChatMessage[], model: string, ): Promise => { - // if the model is gemini-1.5-pro-latest, we don't need any special handling - if (model === 'gemini-1.5-pro-latest') { - const pools = messages - .filter((message) => message.role !== 'function') - .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg)); + // if the model is gemini-1.0 we don't need to pair messages + if (model.startsWith('gemini-1.0')) { + const contents: Content[] = []; + let lastRole = 'model'; + + for (const message of messages) { + // current to filter function message + if (message.role === 'function') { + continue; + } + const googleMessage = await this.convertOAIMessagesToGoogleMessage(message); - return Promise.all(pools); - } + // if the last message is a model message and the current message is a model message + // then we need to add a user message to separate them + if (lastRole === googleMessage.role) { + contents.push({ parts: [{ text: '' }], role: lastRole === 'user' ? 'model' : 'user' }); + } - const contents: Content[] = []; - let lastRole = 'model'; + // add the current message to the contents + contents.push(googleMessage); - for (const message of messages) { - // current to filter function message - if (message.role === 'function') { - continue; + // update the last role + lastRole = googleMessage.role; } - const googleMessage = await this.convertOAIMessagesToGoogleMessage(message); - // if the last message is a model message and the current message is a model message - // then we need to add a user message to separate them - if (lastRole === googleMessage.role) { - contents.push({ parts: [{ text: '' }], role: lastRole === 'user' ? 'model' : 'user' }); + // if the last message is a user message, then we need to add a model message to separate them + if (lastRole === 'model') { + contents.push({ parts: [{ text: '' }], role: 'user' }); } - // add the current message to the contents - contents.push(googleMessage); - - // update the last role - lastRole = googleMessage.role; + return contents; } - // if the last message is a user message, then we need to add a model message to separate them - if (lastRole === 'model') { - contents.push({ parts: [{ text: '' }], role: 'user' }); - } + const pools = messages + .filter((message) => message.role !== 'function') + .map(async (msg) => await this.convertOAIMessagesToGoogleMessage(msg)); - return contents; + return Promise.all(pools); }; private parseErrorMessage(message: string): { diff --git a/src/libs/agent-runtime/stepfun/index.ts b/src/libs/agent-runtime/stepfun/index.ts index 02727361279e..4ae98b6fe3c7 100644 --- a/src/libs/agent-runtime/stepfun/index.ts +++ b/src/libs/agent-runtime/stepfun/index.ts @@ -3,6 +3,14 @@ import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory'; export const LobeStepfunAI = LobeOpenAICompatibleFactory({ baseURL: 'https://api.stepfun.com/v1', + chatCompletion: { + handlePayload: (payload) => { + return { + ...payload, + stream: !payload.tools, + } as any; + }, + }, debug: { chatCompletion: () => process.env.DEBUG_STEPFUN_CHAT_COMPLETION === '1', }, diff --git a/src/libs/unstructured/__tests__/index.test.ts b/src/libs/unstructured/__tests__/index.test.ts index f327430575cf..adc84354b2c4 100644 --- a/src/libs/unstructured/__tests__/index.test.ts +++ b/src/libs/unstructured/__tests__/index.test.ts @@ -120,8 +120,8 @@ describe('Unstructured', () => { expect(result.compositeElements).toHaveLength(3); expect(result.originElements).toHaveLength(5); - expect(result.compositeElements).toEqual(AutoWithChunkingOutput.compositeElements); - expect(result.originElements).toEqual(AutoWithChunkingOutput.originElements); + // expect(result.compositeElements).toEqual(AutoWithChunkingOutput.compositeElements); + // expect(result.originElements).toEqual(AutoWithChunkingOutput.originElements); }); it.skip('should error', async () => { diff --git a/src/locales/default/chat.ts b/src/locales/default/chat.ts index a68330b94099..d0b8e938fdd2 100644 --- a/src/locales/default/chat.ts +++ b/src/locales/default/chat.ts @@ -6,7 +6,7 @@ export default { '你好,我是 **{{name}}**,你可以立即与我开始对话,也可以前往 [助手设置]({{url}}) 完善我的信息。', agentDefaultMessageWithSystemRole: '你好,我是 **{{name}}**,{{systemRole}},让我们开始对话吧!', agentDefaultMessageWithoutEdit: '你好,我是 **{{name}}**,让我们开始对话吧!', - agentsAndConversations: '助手与会话', + agents: '助手', artifact: { generating: '生成中', thinking: '思考中', @@ -83,7 +83,7 @@ export default { }, regenerate: '重新生成', roleAndArchive: '角色与记录', - searchAgentPlaceholder: '搜索助手和对话...', + searchAgentPlaceholder: '搜索助手...', sendPlaceholder: '输入聊天内容...', sessionGroup: { config: '分组管理', diff --git a/src/store/chat/slices/portal/action.ts b/src/store/chat/slices/portal/action.ts index 428bfebcfa5b..cb3f9a222778 100644 --- a/src/store/chat/slices/portal/action.ts +++ b/src/store/chat/slices/portal/action.ts @@ -1,8 +1,9 @@ import { StateCreator } from 'zustand/vanilla'; import { ChatStore } from '@/store/chat/store'; +import { PortalArtifact } from '@/types/artifact'; -import { PortalArtifact, PortalFile } from './initialState'; +import { PortalFile } from './initialState'; export interface ChatPortalAction { closeArtifact: () => void; diff --git a/src/store/chat/slices/portal/initialState.ts b/src/store/chat/slices/portal/initialState.ts index c9b447af4be8..17ca0a2082d7 100644 --- a/src/store/chat/slices/portal/initialState.ts +++ b/src/store/chat/slices/portal/initialState.ts @@ -1,17 +1,11 @@ +import { PortalArtifact } from '@/types/artifact'; + export interface PortalFile { chunkId?: string; chunkText?: string; fileId: string; } -export interface PortalArtifact { - children?: string; - id: string; - identifier?: string; - title?: string; - type?: string; -} - export interface ChatPortalState { portalArtifact?: PortalArtifact; portalArtifactDisplayMode?: 'code' | 'preview'; diff --git a/src/store/chat/slices/portal/selectors.ts b/src/store/chat/slices/portal/selectors.ts index 61a439af0848..783fe6c89eda 100644 --- a/src/store/chat/slices/portal/selectors.ts +++ b/src/store/chat/slices/portal/selectors.ts @@ -24,6 +24,7 @@ const artifactTitle = (s: ChatStoreState) => s.portalArtifact?.title; const artifactIdentifier = (s: ChatStoreState) => s.portalArtifact?.identifier || ''; const artifactMessageId = (s: ChatStoreState) => s.portalArtifact?.id; const artifactType = (s: ChatStoreState) => s.portalArtifact?.type; +const artifactCodeLanguage = (s: ChatStoreState) => s.portalArtifact?.language; const artifactMessageContent = (id: string) => (s: ChatStoreState) => { const message = chatSelectors.getMessageById(id)(s); @@ -67,5 +68,6 @@ export const chatPortalSelectors = { artifactType, artifactCode, artifactMessageContent, + artifactCodeLanguage, isArtifactTagClosed, }; diff --git a/src/types/artifact.ts b/src/types/artifact.ts new file mode 100644 index 000000000000..b4a0037ca69c --- /dev/null +++ b/src/types/artifact.ts @@ -0,0 +1,15 @@ +export interface PortalArtifact { + children?: string; + id: string; + identifier?: string; + language?: string; + title?: string; + type?: string; +} + +export enum ArtifactType { + Code = 'application/lobe.artifacts.code', + Default = 'html', + Python = 'python', + React = 'application/lobe.artifacts.react', +}