From 1e3fc49a2b5df910f346fc058c37dc8fb925a0d2 Mon Sep 17 00:00:00 2001 From: smalltong02 Date: Tue, 2 Jan 2024 21:58:22 -0800 Subject: [PATCH] add new model tinyllama. add download model feature. --- WebUI/configs/webuiconfig.json | 10 + .../model_configuration/configuration.py | 2 +- __webgui_server__.py | 18 +- models/llm/TinyLlama-1.1B-Chat-v1.0/README.md | 61 ++++ models/llm/Yi-6B-Chat-4bits/README.md | 311 ++++++++++++++---- webui-startup.bat | 2 +- 6 files changed, 316 insertions(+), 88 deletions(-) create mode 100644 models/llm/TinyLlama-1.1B-Chat-v1.0/README.md diff --git a/WebUI/configs/webuiconfig.json b/WebUI/configs/webuiconfig.json index 46f21ed..8058b05 100644 --- a/WebUI/configs/webuiconfig.json +++ b/WebUI/configs/webuiconfig.json @@ -411,6 +411,16 @@ "LocalModel": { "LLM Model": { "3B Model": { + "TinyLlama-1.1B-Chat-v1.0": { + "path": "models/llm/TinyLlama-1.1B-Chat-v1.0", + "device": "auto", + "maxmemory": 20, + "cputhreads": 4, + "loadbits": 16, + "preset": "default", + "load_type": "fastchat", + "Huggingface": "TinyLlama/TinyLlama-1.1B-Chat-v1.0" + }, "fastchat-t5-3b-v1.0": { "path": "models/llm/fastchat-t5-3b-v1.0", "device": "auto", diff --git a/WebUI/webui_pages/model_configuration/configuration.py b/WebUI/webui_pages/model_configuration/configuration.py index 8e687e8..f4a8c0f 100644 --- a/WebUI/webui_pages/model_configuration/configuration.py +++ b/WebUI/webui_pages/model_configuration/configuration.py @@ -144,7 +144,7 @@ def configuration_page(api: ApiRequest, is_lite: bool = False): download_path = st.button( "Download", use_container_width=True, - disabled=True + disabled=disabled ) if download_path: with st.spinner(f"Model downloading..., Please do not perform any actions or refresh the page."): diff --git a/__webgui_server__.py b/__webgui_server__.py index cb15aca..22d6d44 100644 --- a/__webgui_server__.py +++ b/__webgui_server__.py @@ -430,19 +430,11 @@ def download_llm_model( hugg_path: str = Body("", description="huggingface path"), local_path: str = Body("", description="local path"), ) -> Dict: - # import gc - # from transformers import AutoModel, AutoTokenizer, AutoConfig - # try: - # tokenizer = AutoTokenizer.from_pretrained(hugg_path) - # tokenizer.save_pretrained(local_path) - # config = AutoConfig.from_pretrained(hugg_path) - # config.save_pretrained(local_path) - # model = AutoModel.from_pretrained(hugg_path) - # model.save_pretrained(local_path) - # del model - # gc.collect() - # return {"code": 200, "msg": f'Success download LLM model {model_name} to local path {local_path}.'} - # except Exception as e: + from huggingface_hub import snapshot_download + try: + path = snapshot_download(repo_id=hugg_path, local_dir=local_path, local_dir_use_symlinks=False) + return {"code": 200, "msg": f'Success download LLM model {model_name} to local path {local_path}.'} + except Exception as e: return {"code": 500, "msg": f'failed to download LLM model {model_name} to local path {local_path}.'} host = FSCHAT_CONTROLLER["host"] diff --git a/models/llm/TinyLlama-1.1B-Chat-v1.0/README.md b/models/llm/TinyLlama-1.1B-Chat-v1.0/README.md new file mode 100644 index 0000000..4e2614f --- /dev/null +++ b/models/llm/TinyLlama-1.1B-Chat-v1.0/README.md @@ -0,0 +1,61 @@ +--- +license: apache-2.0 +datasets: +- cerebras/SlimPajama-627B +- bigcode/starcoderdata +- HuggingFaceH4/ultrachat_200k +- HuggingFaceH4/ultrafeedback_binarized +language: +- en +widget: +- text: "<|system|>\nYou are a chatbot who can help code!\n<|user|>\nWrite me a function to calculate the first 10 digits of the fibonacci sequence in Python and print it out to the CLI.\n<|assistant|>\n" +--- +
+ +# TinyLlama-1.1B +
+ +https://github.com/jzhang38/TinyLlama + +The TinyLlama project aims to **pretrain** a **1.1B Llama model on 3 trillion tokens**. With some proper optimization, we can achieve this within a span of "just" 90 days using 16 A100-40G GPUs 🚀🚀. The training has started on 2023-09-01. + + +We adopted exactly the same architecture and tokenizer as Llama 2. This means TinyLlama can be plugged and played in many open-source projects built upon Llama. Besides, TinyLlama is compact with only 1.1B parameters. This compactness allows it to cater to a multitude of applications demanding a restricted computation and memory footprint. + +#### This Model +This is the chat model finetuned on top of [TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T](https://huggingface.co/TinyLlama/TinyLlama-1.1B-intermediate-step-1431k-3T). **We follow [HF's Zephyr](https://huggingface.co/HuggingFaceH4/zephyr-7b-alpha/edit/main/README.md)'s training recipe.** The model was " initially fine-tuned on a variant of the [`UltraChat`](https://huggingface.co/datasets/stingning/ultrachat) dataset, which contains a diverse range of synthetic dialogues generated by ChatGPT. +We then further aligned the model with [🤗 TRL's](https://github.com/huggingface/trl) `DPOTrainer` on the [openbmb/UltraFeedback](https://huggingface.co/datasets/openbmb/UltraFeedback) dataset, which contain 64k prompts and model completions that are ranked by GPT-4." + + +#### How to use +You will need the transformers>=4.34 +Do check the [TinyLlama](https://github.com/jzhang38/TinyLlama) github page for more information. + +```python +# Install transformers from source - only needed for versions <= v4.34 +# pip install git+https://github.com/huggingface/transformers.git +# pip install accelerate + +import torch +from transformers import pipeline + +pipe = pipeline("text-generation", model="TinyLlama/TinyLlama-1.1B-Chat-v1.0", torch_dtype=torch.bfloat16, device_map="auto") + +# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating +messages = [ + { + "role": "system", + "content": "You are a friendly chatbot who always responds in the style of a pirate", + }, + {"role": "user", "content": "How many helicopters can a human eat in one sitting?"}, +] +prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) +outputs = pipe(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) +print(outputs[0]["generated_text"]) +# <|system|> +# You are a friendly chatbot who always responds in the style of a pirate. +# <|user|> +# How many helicopters can a human eat in one sitting? +# <|assistant|> +# ... +``` \ No newline at end of file diff --git a/models/llm/Yi-6B-Chat-4bits/README.md b/models/llm/Yi-6B-Chat-4bits/README.md index 8e5da0c..3eaf3a5 100644 --- a/models/llm/Yi-6B-Chat-4bits/README.md +++ b/models/llm/Yi-6B-Chat-4bits/README.md @@ -26,63 +26,154 @@ pipeline_tag: text-generation
- - + +
- - + +
- - + +
- - + +
-
- - -
-
- - - +
+

Building the Next Generation of Open-Source and Bilingual LLMs

-
- - - -
+

+🤗 Hugging Face • 🤖 ModelScope • ✡️ WiseModel +

-
- - - -
- -
+

+ 👋 Join us 💬 WeChat (Chinese) ! +

-## Introduction -The **Yi** series models are large language models trained from scratch by -developers at [01.AI](https://01.ai/). + -## News +
+📕 Table of Contents + +- [🟢 What is Yi?](#-what-is-yi) + - [📌 Introduction](#-introduction) + - [🎯 Models](#-models) + - [Chat models](#chat-models) + - [Base models](#base-models) + - [Other info](#other-info) + - [🎉 News](#-news) +- [🟢 Why Yi?](#-why-yi) + - [🌎 Ecosystem](#-ecosystem) + - [💦 Upstream](#-upstream) + - [🌊 Downstream](#-downstream) + - [🔗 Serving](#-serving) + - [⚙️ Quantitation](#️-quantitation) + - [🛠️ Fine-tuning](#️-fine-tuning) + - [📌 Benchmarks](#-benchmarks) + - [📊 Base model performance](#-base-model-performance) + - [📊 Chat model performance](#-chat-model-performance) + - [📊 Quantized chat model performance](#-quantized-chat-model-performance) + - [⛔️ Limitations of chat model](#️-limitations-of-chat-model) +- [🟢 Who can use Yi?](#-who-can-use-yi) +- [🟢 How to use Yi?](#-how-to-use-yi) + - [1. Prepare development environment](#1-prepare-development-environment) + - [1.1 Docker](#11-docker) + - [1.2 Local development environment](#12-local-development-environment) + - [2. Download the model (optional)](#2-download-the-model-optional) + - [3. Examples](#3-examples) + - [3.1 Use the chat model](#31-use-the-chat-model) + - [3.2 Use the base model](#32-use-the-base-model) + - [3.3 Finetune from the base model](#33-finetune-from-the-base-model) + - [3.4 Quantization](#34-quantization) + - [GPT-Q](#gpt-q) + - [AWQ](#awq) +- [🟢 Misc.](#-misc) + - [📡 Disclaimer](#-disclaimer) + - [🪪 License](#-license) + +
+ +
+ +# 🟢 What is Yi? + +## 📌 Introduction + +- 🤖 The Yi series models are the next generation of open source large language models trained from strach by [01.AI](https://01.ai/). + +- 🙌 Targeted as a bilingual language model and trained on 3T multilingual corpus, the Yi series models become one of the strongest LLM worldwide, showing promise in language understanding, commonsense reasoning, reading comprehension, and more. For example, + + - For English language capability, the Yi series models ranked 2nd (just behind GPT-4), outperforming other LLMs (such as LLaMA2-chat-70B, Claude 2, and ChatGPT) on the [AlpacaEval Leaderboard](https://tatsu-lab.github.io/alpaca_eval/) in Dec 2023. + + - For Chinese language capability, the Yi series models landed in 2nd place (following GPT4), surpassing other LLMs (such as Baidu ERNIE, Qwen, and Baichuan) on the [SuperCLUE](https://www.superclueai.com/) in Oct 2023. + +- 🙏 (Credits to LLaMA) Thanks to the Transformer and LLaMA open-source communities, as they reducing the efforts required to build from scratch and enabling the utilization of the same tools within the AI ecosystem. If you're interested in Yi's adoption of LLaMA architecture and license usage policy, see [Yi's relation with LLaMA](https://github.com/01-ai/Yi/blob/main/docs/yi_relation_llama.md). + +
[ Back to top ⬆️ ]
+ +## 🎯 Models + +Yi models come in multiple sizes and cater to different use cases. You can also fine-tune Yi models to meet your specific requirements. + +### Chat models + +| Model | Download +|---|--- +Yi-6B-Chat| • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-6B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-6B-Chat/summary) +Yi-6B-Chat-4bits | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-6B-Chat-4bits) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-6B-Chat-4bits/summary) +Yi-6B-Chat-8bits | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-6B-Chat-8bits) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-6B-Chat-8bits/summary) +Yi-34B-Chat | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-34B-Chat) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-34B-Chat/summary) +Yi-34B-Chat-4bits | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-34B-Chat-4bits) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-34B-Chat-4bits/summary) +Yi-34B-Chat-8bits | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-34B-Chat-8bits) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-34B-Chat-8bits/summary) + + - 4 bits series models are quantized by AWQ.
- 8 bits series models are quantized by GPTQ
- All quantized models have a low barrier to use since they can be deployed on consumer-grade GPUs (e.g., 3090, 4090).
+ +### Base models + +| Model | Download | +|---|---| +Yi-6B| • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-6B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-6B/summary) +Yi-6B-200K | • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-6B-200K) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-6B-200K/summary) +Yi-34B| • [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-34B) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-34B/summary) +Yi-34B-200K|• [🤗 Hugging Face](https://huggingface.co/01-ai/Yi-34B-200K) • [🤖 ModelScope](https://www.modelscope.cn/models/01ai/Yi-34B-200K/summary) + + - 200k is roughly equivalent to 400,000 Chinese characters. + +### Other info + +For chat models and base models: + +- 6B series models are suitable for personal and academic use. + +- 34B series models suitable for personal, academic, and commercial (particularly for small and medium-sized enterprises) purposes. It's a cost-effective solution that's affordable and equipped with emergent ability. + +- The **default context window** is **4k tokens**. + +- The pretrained tokens are 3T. + +- The training data are up to June 2023. + +
[ Back to top ⬆️ ]
+ +## 🎉 News + +
🎯 2023/11/23: The chat models are open to public. This release contains two chat models based on previous released base models, two 8-bits models quantized by GPTQ, two 4-bits models quantized by AWQ. @@ -96,15 +187,15 @@ This release contains two chat models based on previous released base models, tw You can try some of them interactively at: -- [HuggingFace](https://huggingface.co/spaces/01-ai/Yi-34B-Chat) +- [Hugging Face](https://huggingface.co/spaces/01-ai/Yi-34B-Chat) - [Replicate](https://replicate.com/01-ai)
-
+
🔔 2023/11/23: The Yi Series Models Community License Agreement is updated to v2.1.
-
+
🔥 2023/11/08: Invited test of Yi-34B chat model. Application form: @@ -131,28 +222,91 @@ sequence length and can be extended to 32K during inference time.
-## Ecosystem +
[ Back to top ⬆️ ]
+ + +# 🟢 Why Yi? + +## 🌎 Ecosystem + +Yi has a comprehensive ecosystem, offering a range of tools, services, and models to enrich your experiences and maximize productivity. + +- [💦 Upstream](#-upstream) +- [🌊 Downstream](#-downstream) + - [🔗 Serving](#-serving) + - [⚙️ Quantitation](#️-quantitation) + - [🛠️ Fine-tuning](#️-fine-tuning) + +### 💦 Upstream + +The Yi series models follow the same model architecture as LLaMA. By choosing Yi, you can leverage existing tools, libraries, and resources within the LLaMA ecosystem, eliminating the need to create new tools and enhancing development efficiency. + +For example, the Yi series models are saved in the format of the LLaMA model. You can directly use `LLaMAForCausalLM` and `LLaMATokenizer` to load the model. For more information, see [Use the chat model](#31-use-the-chat-model). + +```python +from transformers import AutoModelForCausalLM, AutoTokenizer + +tokenizer = AutoTokenizer.from_pretrained("01-ai/Yi-34b", use_fast=False) + +model = AutoModelForCausalLM.from_pretrained("01-ai/Yi-34b", device_map="auto") +``` + +### 🌊 Downstream -🤗 You are encouraged to create a PR and share your awesome work built on top of -the Yi series models. +> 💡 Tip +> +> - Feel free to create a PR and share the fantastic work you've built using the Yi series models. +> +> - To help others quickly understand your work, it is recommended to use the format of `: + `. -- Serving - - [ScaleLLM](https://github.com/vectorch-ai/ScaleLLM#supported-models): Efficiently run Yi models locally. -- Quantization - - [TheBloke/Yi-34B-GGUF](https://huggingface.co/TheBloke/Yi-34B-GGUF) - - [TheBloke/Yi-34B-GPTQ](https://huggingface.co/TheBloke/Yi-34B-GPTQ) -- Finetuning - - [NousResearch/Nous-Capybara-34B](https://huggingface.co/NousResearch/Nous-Capybara-34B) - - [SUSTech/SUS-Chat-34B](https://huggingface.co/SUSTech/SUS-Chat-34B): This - model ranks first among all models below 70B and has outperformed the twice - larger - [deepseek-llm-67b-chat](https://huggingface.co/deepseek-ai/deepseek-llm-67b-chat). - You can check the result in [🤗 Open LLM - Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). +#### 🔗 Serving -## Model Performance +If you want to get up with Yi in a few minutes, you can use the following services built upon Yi. -### Base Model Performance +- [Yi-34B-Chat](https://platform.lingyiwanwu.com/) (Yi official beta): you can chat with it. **Note** that currently it's available through a whitelist. Welcome to apply (fill out a form in [English](https://cn.mikecrm.com/l91ODJf) or [Chinese](https://cn.mikecrm.com/gnEZjiQ)) and experience it firsthand! + +- [Yi-6B-Chat (Replicate)](https://replicate.com/01-ai): you can use this model with more options by setting additional parameters and calling APIs. + +- [ScaleLLM](https://github.com/vectorch-ai/ScaleLLM#supported-models): you can use this service to run Yi models locally with added flexibility and customization. + +#### ⚙️ Quantitation + +If you have limited computational capabilities, you can use Yi's quantized models as follows. + +These quantized models have reduced precision and but offer increased efficiency, such as faster inference speed and smaller RAM usage. + +- [TheBloke/Yi-34B-GPTQ](https://huggingface.co/TheBloke/Yi-34B-GPTQ) +- [TheBloke/Yi-34B-GGUF](https://huggingface.co/TheBloke/Yi-34B-GGUF) +- [TheBloke/Yi-34B-AWQ](https://huggingface.co/TheBloke/Yi-34B-AWQ) + +#### 🛠️ Fine-tuning + +If you're seeking to explore the diverse capabilities within Yi's thriving family, you can delve into Yi's fine-tuned models as below. + +- [TheBloke Models](https://huggingface.co/TheBloke): this site hosts numerous fine-tuned models derived from various LLMs including Yi. + + This is not an exhaustive list for Yi, but to name a few sorted on downloads: + - [TheBloke/dolphin-2_2-yi-34b-AWQ](https://huggingface.co/TheBloke/dolphin-2_2-yi-34b-AWQ) + - [TheBloke/Yi-34B-Chat-AWQ](https://huggingface.co/TheBloke/Yi-34B-Chat-AWQ) + - [TheBloke/Yi-34B-Chat-GPTQ](https://huggingface.co/TheBloke/Yi-34B-Chat-GPTQ) + +- [SUSTech/SUS-Chat-34B](https://huggingface.co/SUSTech/SUS-Chat-34B): this model ranked first among all models below 70B and outperformed the twice larger deepseek-llm-67b-chat. You can check the result on the [Open LLM Leaderboard](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard). + +- [OrionStarAI/OrionStar-Yi-34B-Chat-Llama](https://huggingface.co/OrionStarAI/OrionStar-Yi-34B-Chat-Llama): this model excelled beyond other models (such as GPT-4, Qwen-14B-Chat, Baichuan2-13B-Chat) in C-Eval and CMMLU evaluations on the [OpenCompass LLM Leaderboard](https://opencompass.org.cn/leaderboard-llm). + +- [NousResearch/Nous-Capybara-34B](https://huggingface.co/NousResearch/Nous-Capybara-34B): this model is trained with 200K context length and 3 epochs on the Capybara dataset. + + + + +## 📌 Benchmarks + +- [📊 Base model performance](#-base-model-performance) +- [📊 Chat model performance](#-chat-model-performance) +- [📊 Quantized chat model performance](#-quantized-chat-model-performance) +- [⛔️ Limitations of chat model](#️-limitations-of-chat-model) + +### 📊 Base model performance | Model | MMLU | CMMLU | C-Eval | GAOKAO | BBH | Common-sense Reasoning | Reading Comprehension | Math & Code | | :------------ | :------: | :------: | :------: | :------: | :------: | :--------------------: | :-------------------: | :---------: | @@ -193,7 +347,7 @@ is derived by averaging the scores on the remaining tasks. Since the scores for these two tasks are generally lower than the average, we believe that Falcon-180B's performance was not underestimated. -### Chat Model Performance +### 📊 Chat model performance | Model | MMLU | MMLU | CMMLU | CMMLU | C-Eval(val)* | C-Eval(val)* | Truthful QA | BBH | BBH | GSM8k | GSM8k | | ----------------------- | --------- | --------- | --------- | --------- | ----------------------- | ----------------------- | ----------- | --------- | --------- | --------- | --------- | @@ -215,7 +369,7 @@ We evaluated various benchmarks using both zero-shot and few-shot methods, excep *: C-Eval results are evaluated on the validation datasets -### Quantized Chat Model Performance +### 📊 Quantized chat model performance We also provide both 4-bit (AWQ) and 8-bit (GPTQ) quantized Yi chat models. Evaluation results on various benchmarks have shown that the quantized models have negligible losses. Additionally, they reduce the memory footprint size. After testing different configurations of prompts and generation lengths, we highly recommend following the guidelines in the memory footprint table below when selecting a device to run our models. @@ -230,7 +384,7 @@ We also provide both 4-bit (AWQ) and 8-bit (GPTQ) quantized Yi chat models. Eval Note: All the numbers in the table represent the minimum recommended memory for running models of the corresponding size. -### Limitations of Chat Model +### ⛔️ Limitations of chat model The released chat model has undergone exclusive training using Supervised Fine-Tuning (SFT). Compared to other standard chat models, our model produces more diverse responses, making it suitable for various downstream tasks, such as creative scenarios. Furthermore, this diversity is expected to enhance the likelihood of generating higher quality responses, which will be advantageous for subsequent Reinforcement Learning (RL) training. @@ -242,12 +396,24 @@ However, this higher diversity might amplify certain existing issues, including: To achieve more coherent and consistent responses, it is advisable to adjust generation configuration parameters such as`temperature`,`top_p`, or`top_k`. These adjustments can help in the balance between creativity and coherence in the model's outputs. + + + +# 🟢 Who can use Yi? + +Everyone! 🙌 ✅ + +- The Yi series models are free for personal usage, academic purposes, and commercial use. All usage must adhere to the [Yi Series Models Community License Agreement 2.1](https://github.com/01-ai/Yi/blob/main/MODEL_LICENSE_AGREEMENT.txt) + +- For free commercial use, you only need to [complete this form](https://www.lingyiwanwu.com/yi-license) to get Yi Model Commercial License. + -## Usage +# 🟢 How to use Yi? -Feel free to [create an issue](https://github.com/01-ai/Yi/issues/new) if you -encounter any problem when using the **Yi** series models. +[1. Prepare development environment](#1-prepare-development-environment) +
[2. Download the model](#2-download-the-model-optional) +
[3. Examples](#3-examples) ### 1. Prepare development environment @@ -272,7 +438,7 @@ To install the dependencies, please follow these steps: ### 2. Download the model (optional) By default, the model weights and tokenizer will be downloaded from -[HuggingFace](https://huggingface.co/01-ai) automatically in the next step. You +[Hugging Face](https://huggingface.co/01-ai) automatically in the next step. You can also download them manually from the following places: - [ModelScope](https://www.modelscope.cn/organization/01ai/) @@ -377,7 +543,7 @@ The Arctic is a place of great beauty. The ice and snow are a For more advanced usage, please refer to the [doc](https://github.com/01-ai/Yi/tree/main/demo). -#### 3.3 Finetuning from the base model: +#### 3.3 Finetune from the base model ```bash bash finetune/scripts/run_sft_Yi_6b.sh @@ -430,15 +596,11 @@ python quantization/awq/eval_quantized_model.py \ For more detailed explanation, please read the [doc](https://github.com/01-ai/Yi/tree/main/quantization/awq) -## FAQ + -1. **What dataset was this trained with?** +# 🟢 Misc. - The dataset we use contains Chinese & English only. We used approximately 3T - tokens. The detailed number and its construction will be described in the - upcoming technical report. - -## Disclaimer +### 📡 Disclaimer We use data compliance checking algorithms during the training process, to ensure the compliance of the trained model to the best of our ability. Due to @@ -449,12 +611,15 @@ problematic outputs. We will not be responsible for any risks and issues resulting from misuse, misguidance, illegal usage, and related misinformation, as well as any associated data security concerns. -## License + + + +### 🪪 License The source code in this repo is licensed under the [Apache 2.0 license](https://github.com/01-ai/Yi/blob/main/LICENSE). The Yi series models are fully open for academic research and free commercial usage with permission -via applications. All usage must adhere to the [Model License -Agreement 2.0](https://github.com/01-ai/Yi/blob/main/MODEL_LICENSE_AGREEMENT.txt). -To apply for the official commercial license, please contact us -([yi@01.ai](mailto:yi@01.ai)). +via applications. All usage must adhere to the [Yi Series Models Community License Agreement 2.1](https://github.com/01-ai/Yi/blob/main/MODEL_LICENSE_AGREEMENT.txt). +For free commercial use, you only need to send an email to [get official commercial permission](https://www.lingyiwanwu.com/yi-license). + + diff --git a/webui-startup.bat b/webui-startup.bat index 8f8749b..4013bcb 100644 --- a/webui-startup.bat +++ b/webui-startup.bat @@ -4,6 +4,6 @@ REM Task 1: Start conda environment and run Python web server start cmd /k "cd /d %~dp0 && conda activate keras-llm-robot && python __webgui_server__.py --webui" REM Task 2: Start SSL proxy -start cmd /k "cd /d %~dp0tools && ssl-proxy -from 0.0.0.0:4430 -to 127.0.0.1:8818" +start cmd /k "cd /d %~dp0tools && ssl-proxy -from 0.0.0.0:4480 -to 127.0.0.1:8818" exit \ No newline at end of file