From 5bf1a58d5b4fe9ac4870fd52bd38040413d73b7d Mon Sep 17 00:00:00 2001 From: alannikos Date: Sat, 11 Jan 2025 13:59:13 +0800 Subject: [PATCH] update readme.md --- README.md | 7 + build/lib/edg4llm/__init__.py | 7 + build/lib/edg4llm/core/__init__.py | 1 + build/lib/edg4llm/core/dataGenerators.py | 253 +++++++++++++ build/lib/edg4llm/core/interface.py | 333 ++++++++++++++++++ build/lib/edg4llm/core/pipeline.py | 88 +++++ build/lib/edg4llm/generators/__init__.py | 0 .../generators/text_generators/__init__.py | 0 .../text_generators/answer_generator.py | 191 ++++++++++ .../text_generators/base_generator.py | 131 +++++++ .../text_generators/dialogue_generator.py | 159 +++++++++ .../text_generators/question_generator.py | 151 ++++++++ build/lib/edg4llm/models/__init__.py | 0 build/lib/edg4llm/models/baseModel.py | 126 +++++++ build/lib/edg4llm/models/chatglm.py | 273 ++++++++++++++ build/lib/edg4llm/models/chatgpt.py | 286 +++++++++++++++ build/lib/edg4llm/models/deepseek.py | 294 ++++++++++++++++ build/lib/edg4llm/models/internlm.py | 281 +++++++++++++++ build/lib/edg4llm/processor/__init__.py | 0 build/lib/edg4llm/processor/postprocess.py | 231 ++++++++++++ build/lib/edg4llm/processor/preprocess.py | 139 ++++++++ build/lib/edg4llm/utils/__init__.py | 0 build/lib/edg4llm/utils/config.py | 8 + build/lib/edg4llm/utils/data_utils.py | 157 +++++++++ build/lib/edg4llm/utils/exceptions.py | 35 ++ build/lib/edg4llm/utils/logger.py | 104 ++++++ build/lib/edg4llm/utils/template.py | 113 ++++++ dist/edg4llm-1.0.14-py3-none-any.whl | Bin 0 -> 42481 bytes dist/edg4llm-1.0.14.tar.gz | Bin 0 -> 31317 bytes edg4llm.egg-info/PKG-INFO | 281 +++++++++++++++ edg4llm.egg-info/SOURCES.txt | 35 ++ edg4llm.egg-info/dependency_links.txt | 1 + edg4llm.egg-info/not-zip-safe | 1 + edg4llm.egg-info/requires.txt | 1 + edg4llm.egg-info/top_level.txt | 1 + 35 files changed, 3688 insertions(+) create mode 100644 build/lib/edg4llm/__init__.py create mode 100644 build/lib/edg4llm/core/__init__.py create mode 100644 build/lib/edg4llm/core/dataGenerators.py create mode 100644 build/lib/edg4llm/core/interface.py create mode 100644 build/lib/edg4llm/core/pipeline.py create mode 100644 build/lib/edg4llm/generators/__init__.py create mode 100644 build/lib/edg4llm/generators/text_generators/__init__.py create mode 100644 build/lib/edg4llm/generators/text_generators/answer_generator.py create mode 100644 build/lib/edg4llm/generators/text_generators/base_generator.py create mode 100644 build/lib/edg4llm/generators/text_generators/dialogue_generator.py create mode 100644 build/lib/edg4llm/generators/text_generators/question_generator.py create mode 100644 build/lib/edg4llm/models/__init__.py create mode 100644 build/lib/edg4llm/models/baseModel.py create mode 100644 build/lib/edg4llm/models/chatglm.py create mode 100644 build/lib/edg4llm/models/chatgpt.py create mode 100644 build/lib/edg4llm/models/deepseek.py create mode 100644 build/lib/edg4llm/models/internlm.py create mode 100644 build/lib/edg4llm/processor/__init__.py create mode 100644 build/lib/edg4llm/processor/postprocess.py create mode 100644 build/lib/edg4llm/processor/preprocess.py create mode 100644 build/lib/edg4llm/utils/__init__.py create mode 100644 build/lib/edg4llm/utils/config.py create mode 100644 build/lib/edg4llm/utils/data_utils.py create mode 100644 build/lib/edg4llm/utils/exceptions.py create mode 100644 build/lib/edg4llm/utils/logger.py create mode 100644 build/lib/edg4llm/utils/template.py create mode 100644 dist/edg4llm-1.0.14-py3-none-any.whl create mode 100644 dist/edg4llm-1.0.14.tar.gz create mode 100644 edg4llm.egg-info/PKG-INFO create mode 100644 edg4llm.egg-info/SOURCES.txt create mode 100644 edg4llm.egg-info/dependency_links.txt create mode 100644 edg4llm.egg-info/not-zip-safe create mode 100644 edg4llm.egg-info/requires.txt create mode 100644 edg4llm.egg-info/top_level.txt diff --git a/README.md b/README.md index 5bc9f17..edd6e05 100644 --- a/README.md +++ b/README.md @@ -3,6 +3,13 @@
![welcome](assets/welcome.png) + +
diff --git a/build/lib/edg4llm/__init__.py b/build/lib/edg4llm/__init__.py new file mode 100644 index 0000000..1cdaf9f --- /dev/null +++ b/build/lib/edg4llm/__init__.py @@ -0,0 +1,7 @@ +from edg4llm.core.interface import EDG4LLM + +__all__ = ["EDG4LLM"] + +__version__ = "1.0.14" +__author__ = "Alannikos" +__license__ = "MIT" diff --git a/build/lib/edg4llm/core/__init__.py b/build/lib/edg4llm/core/__init__.py new file mode 100644 index 0000000..3f40e95 --- /dev/null +++ b/build/lib/edg4llm/core/__init__.py @@ -0,0 +1 @@ +from edg4llm.core.interface import EDG4LLM diff --git a/build/lib/edg4llm/core/dataGenerators.py b/build/lib/edg4llm/core/dataGenerators.py new file mode 100644 index 0000000..12bb8cf --- /dev/null +++ b/build/lib/edg4llm/core/dataGenerators.py @@ -0,0 +1,253 @@ +import os +from typing import Dict, List, Any + +from edg4llm.utils.logger import custom_logger +from edg4llm.models.chatglm import EDGChatGLM +from edg4llm.models.chatgpt import EDGChatGPT +from edg4llm.models.internlm import EDGInternLM +from edg4llm.models.deepseek import EDGDeepSeek +from edg4llm.generators.text_generators.answer_generator import AnswerGenerator +from edg4llm.generators.text_generators.question_generator import QuestionGenerator +from edg4llm.generators.text_generators.dialogue_generator import DialogueGenerator + +from edg4llm.processor.preprocess import PreProcessor + +logger = custom_logger("dataGenerator") + +class DataGenerator: + def __init__(self, pConfig): + """ + Initialize the Data Generator + + This method initializes the model and its associated generators (Answer, Question, Dialogue) + based on the provided configuration parameters. + + Parameters + ---------- + pConfig : dict + A configuration dictionary containing the following key-value pairs: + - "model_provider" : str, optional + The type of language model to use ("chatglm", "chatgpt", "internlm", "deepseek"). Default is "chatglm". + - "model_name" : str, optional + The specific model to use within the selected provider. Default is "chatglm-4-flash". + - "base_url" : str + The base URL for the LLM API. Default is None. + - "api_key" : str + The API key for authenticating requests. Default is None. + + Raises + ------ + ValueError + If the provided model type is not supported, raises a `ValueError`. + + Attributes + ---------- + model : object + The selected language model instance, initialized based on the "model_provider" configuration. + answer_generator : AnswerGenerator + An instance of the AnswerGenerator to generate answers. + question_generator : QuestionGenerator + An instance of the QuestionGenerator to generate questions. + dialogue_generator : DialogueGenerator + An instance of the DialogueGenerator to generate dialogues. + + Notes + ----- + - Supported model providers include: "chatglm", "chatgpt", "internlm", "deepseek". + - If the "model_provider" is unsupported, a `ValueError` will be raised. + """ + + if pConfig["model_provider"] == "chatglm": + self.model = EDGChatGLM( + model_name=pConfig["model_name"], + base_url=pConfig["base_url"], + api_key=pConfig["api_key"] + ) + elif pConfig["model_provider"] == "chatgpt": + self.model = EDGChatGPT( + model_name=pConfig["model_name"], + base_url=pConfig["base_url"], + api_key=pConfig["api_key"] + ) + elif pConfig["model_provider"] == "internlm": + self.model = EDGInternLM( + model_name=pConfig["model_name"], + base_url=pConfig["base_url"], + api_key=pConfig["api_key"] + ) + elif pConfig["model_provider"] == "deepseek": + self.model = EDGDeepSeek( + model_name=pConfig["model_name"], + base_url=pConfig["base_url"], + api_key=pConfig["api_key"] + ) + else: + raise ValueError("Unsupported model provider") + + self.preprocessor = PreProcessor() + self.answer_generator = AnswerGenerator(self.model) + self.question_generator = QuestionGenerator(self.model) + self.dialogue_generator = DialogueGenerator(self.model) + + def generate_question(self, tConfig) -> List[Dict]: + """ + Generate questions based on the given configuration. + + This method uses the `question_generator` to generate question data based on + the provided configuration options. It supports various parameters to control + the question generation process, such as task type, prompts, sampling strategies, and output formatting. + + Parameters + ---------- + tConfig : dict + A configuration dictionary containing the following key-value pairs: + - "language" : str, optional + The language of data in data generation. Must be one of 'zh', 'en'. + Default is 'zh'. + - "task_type" : str, optional + The type of task for data generation. Must be 'question' to ensure valid output. + Default is 'question'. + - "system_prompt" : str, optional + A system-level prompt to guide the question generation. Default is None. + - "user_prompt" : str, optional + A user-provided prompt to initiate the question generation. Default is None. + - "do_sample" : bool, optional + Whether to use sampling during question generation. If True, enables sampling strategies like + temperature and top_p. If False, uses deterministic decoding. Default is True. + - "temperature" : float, optional + Sampling temperature to control randomness. Must be in the range [0.0, 1.0]. + Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter for controlling randomness. Must be in the range [0.0, 1.0]. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens to generate in the question output. Default is 4095. + - "num_samples" : int, optional + The number of question samples to generate. Default is 10. + - "output_format" : str, optional + The format of the output, such as "alpaca" or other formats. Default is "alpaca". + + Returns + ------- + list of dict + A list of dictionaries containing the generated question outputs. + + Notes + ----- + - This method uses the `generate` method from the `question_generator` to produce question data + based on the provided configuration. + - The `tConfig` dictionary allows for flexible question generation based on task type, + system/user prompts, and various sampling strategies. + """ + + tConfig["user_prompt"] = self.preprocessor.question_preprocess(tConfig["language"], tConfig["user_prompt"]) + + data = self.question_generator.generate(tConfig) + return data + + def generate_answer(self, tConfig) -> List[Dict]: + """ + Generate answers based on the given configuration. + + This method uses the `answer_generator` to generate answer data based on + the provided configuration options. It supports various parameters to control + the answer generation process, such as task type, prompts, sampling strategies, and output formatting. + + Parameters + ---------- + tConfig : dict + A configuration dictionary containing the following key-value pairs: + - "language" : str, optional + The language of data in data generation. Must be one of 'zh', 'en'. + Default is 'zh'. + - "task_type" : str, optional + The type of task for data generation. Must be 'answer' to ensure valid output. + Default is 'answer'. + - "system_prompt" : str, optional + A system-level prompt to guide the answer generation. Default is None. + - "user_prompt" : str, optional + A user-provided prompt to initiate the answer generation. Default is None. + - "do_sample" : bool, optional + Whether to use sampling during answer generation. If True, enables sampling strategies like + temperature and top_p. If False, uses deterministic decoding. Default is True. + - "temperature" : float, optional + Sampling temperature to control randomness. Must be in the range [0.0, 1.0]. + Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter for controlling randomness. Must be in the range [0.0, 1.0]. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens to generate in the answer output. Default is 4095. + - "num_samples" : int, optional + The number of answer samples to generate. Default is 10. + - "output_format" : str, optional + The format of the output, such as "json" or other formats. Default is "json". + + Returns + ------- + list of dict + A list of dictionaries containing the generated answer outputs. + + Notes + ----- + - This method uses the `generate` method from the `answer_generator` to produce answer data + based on the provided configuration. + - The `tConfig` dictionary allows for flexible answer generation based on task type, + system/user prompts, and various sampling strategies. + """ + + tConfig["user_prompt"] = self.preprocessor.answer_preprocess(tConfig["language"], tConfig["user_prompt"]) + data = self.answer_generator.generate(tConfig) + return data + + def generate_dialogue(self, tConfig) -> List[Dict]: + """ + Generate a dialogue based on the given configuration. + + This method utilizes the `dialogue_generator` to generate dialogues using the + provided configuration options. It supports various parameters to control + the text generation process, such as task type, prompts, sampling strategies, and output formatting. + + Parameters + ---------- + tConfig : dict + A configuration dictionary containing the following key-value pairs: + - "language" : str, optional + The language of data in data generation. Must be one of 'zh', 'en'. + Default is 'zh'. + - "task_type" : str, optional + The type of task for data generation. Must be one of 'question', 'answer', or 'dialogue'. + Default is 'dialogue'. + - "system_prompt" : str, optional + A system-level prompt to guide the text generation. Default is None. + - "user_prompt" : str, optional + A user-provided prompt to initiate the text generation. Default is None. + - "do_sample" : bool, optional + Whether to use sampling during text generation. If True, enables sampling strategies like temperature + and top_p. If False, uses deterministic decoding. Default is True. + - "temperature" : float, optional + Sampling temperature to control randomness. Must be in the range [0.0, 1.0]. + Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter for controlling randomness. Must be in the range [0.0, 1.0]. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens to generate in the output. Default is 4095. + - "num_samples" : int, optional + The number of output samples to generate. Default is 10. + - "output_format" : str, optional + The format of the output. Default is "alpaca". + + Returns + ------- + list of dict + A list of dictionaries containing the generated dialogue outputs. + + Notes + ----- + - This method uses the `generate` method from the `dialogue_generator` to produce dialogue outputs + based on the provided configuration. + - The `tConfig` dictionary allows for flexible generation based on task type, system/user prompts, + and various sampling strategies. + """ + + tConfig["user_prompt"] = self.preprocessor.dialogue_preprocess(tConfig["language"], tConfig["user_prompt"]) + data = self.dialogue_generator.generate(tConfig) + return data \ No newline at end of file diff --git a/build/lib/edg4llm/core/interface.py b/build/lib/edg4llm/core/interface.py new file mode 100644 index 0000000..64fe88d --- /dev/null +++ b/build/lib/edg4llm/core/interface.py @@ -0,0 +1,333 @@ +""" +EDG4LLM: A Comprehensive Interface for Text Generation with Configurable LLMs + +Overview +-------- +The EDG4LLM class serves as a high-level interface for generating text using a language model pipeline. +It supports configuration for task types, prompts, sampling strategies, and output formats, making it versatile +and adaptable to various use cases. + +Key Features +------------ +- Task Flexibility: Supports task types such as 'dialogue', 'question', and 'answer'. +- Custom Prompts: Allows system-level and user-level prompts to guide the generation process. +- Sampling Controls: Provides options to customize randomness and diversity of outputs using + parameters like `do_sample`, `temperature`, and `top_p`. +- Output Formats: Compatible with customizable output formats, such as "alpaca". +""" + + + +import os +from typing import Any, Tuple, Dict + +from edg4llm.utils.logger import custom_logger +from edg4llm.core.pipeline import DataPipeline + +logger = custom_logger("interface") + + +class EDG4LLM: + """ + EDG4LLM: A Class for Configurable Text Generation with LLMs + + This class provides an interface for generating text using a configurable language model pipeline. + It allows users to specify a variety of parameters, including model type, prompts, sampling strategies, + and output formats. + + Attributes + ---------- + pipeline : DataPipeline + An instance of the `DataPipeline` class, used to handle the data processing + and interaction with the language model. + + Methods + ------- + __init__(model_provider: str = "chatglm", model_name: str = "chatglm-4-flash", base_url: str = None, api_key: str = None): + Initializes the EDG4LLM instance with the model type, base URL, and API key. + + generate(task_type: str = 'dialogue', system_prompt: str = None, user_prompt: str = None, + do_sample: bool = True, temperature: float = 0.95, top_p: float = 0.7, + max_tokens: int = 4095, num_samples: int = 10, output_format: str = "alpaca") -> List[Dict]: + Generates text data based on the provided configuration. + + Notes + ----- + - This class leverages the `DataPipeline` for all interactions with the language model. + - The `generate` method is user-facing. + - Supports customization for tasks like 'dialogue', 'question', and 'answer'. + - Ensures compatibility with different output formats (e.g., "alpaca"). + + Examples + -------- + >>> # Create an instance of EDG4LLM + >>> generator = EDG4LLM(model_provider="chatglm", model_name="chatglm-4-flash", base_url="https://api.example.com", api_key="your_api_key") + + >>> # Generate a dialogue response + >>> response = generator.generate( + task_type="answer", + system_prompt="You are a helpful assistant.", + user_prompt="What is the weather today?", + max_tokens=100 + ) + + >>> print(response) + Output: [{'output': 'The weather today is sunny with a high of 25°C.'}] + """ + def __init__(self, + model_provider: str = "chatglm", + model_name: str = "chatglm-4-flash", + base_url: str = None, + api_key: str = None): + """ + Initialize the EDG4LLM instance with the necessary parameters. + + Parameters + ---------- + model_provider: str, optional + The type of language model to use, by default "chatglm". + model_name : str, optional + The specific model to use within the model type, by default "chatglm-4-flash". + base_url : str, optional + The base URL of the LLM API, by default None. + api_key : str, optional + The API key for authenticating requests, by default None. + """ + + self._pConfig = { + "model_provider": model_provider + ,"model_name" : model_name + , "base_url": base_url + , "api_key" : api_key + } + + self.pipeline = DataPipeline(self._pConfig) + logger.info("DataPipeline initialized successfully with the provided configuration.") + + def generate(self + , language: str = 'zh' + , task_type: str = 'dialogue' + , system_prompt: str = None + , user_prompt: str = None + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + , num_samples: int = 10 + , output_format: str = "alpaca" + , question_path: str = None + ): + """ + Generate text data based on the specified configuration. + + Parameters + ---------- + language : str, optional + The language of data in data generation. Must be one of 'zh', 'en'. + Default is 'zh'. + + task_type : str, optional + The type of task for data generation. Must be one of 'question', 'answer', or 'dialogue'. + Default is 'dialogue'. + + system_prompt : str, optional + A system-level prompt to guide the text generation. + Default is None. + + user_prompt : str, optional + A user-provided prompt to initiate the text generation. + Default is None. + + do_sample : bool, optional + Whether to use sampling during text generation. + - If True, enables sampling strategies like temperature and top_p. + - If False, uses deterministic decoding (e.g., greedy decoding), and + `temperature` and `top_p` are ignored. + Default is True. + + temperature : float, optional + Sampling temperature to control randomness. + - Must be a positive number in the range [0.0, 1.0]. + - Higher values produce more diverse outputs, while lower values make + the output more focused and deterministic. + Default is 0.95. + + top_p : float, optional + Nucleus sampling parameter for controlling randomness. + - Limits token selection to the top cumulative probability range + defined by p. + - Must be in the range [0.0, 1.0]. + Default is 0.7. + + max_tokens : int, optional + The maximum number of tokens to generate in the output. + - Default: 4095. + - Maximum allowed value: 4095 (values exceeding this will be capped). + + num_samples : int, optional + The number of output samples to generate. + Default is 10. + + output_format : str, optional + The format of the output. + Default is "alpaca". + + question_path : str, optional + The path to a file containing a list of questions. + - Only applicable when `task_type` is set to 'answer'. + - The model will read the file and generate answers for each question in the file. + - The output will be returned in a specific format as defined by the `output_format` parameter. + Default is None. + + Returns + ------- + list of dict + A list of dictionaries containing the generated outputs. + + Examples + -------- + >>> # Create an instance of EDG4LLM + >>> generator = EDG4LLM(model_provider="chatglm", model_name="chatglm-4-flash", base_url="https://api.example.com", api_key="your_api_key") + + >>> # Generate a dialogue response + >>> response = generator.generate( + task_type="answer", + system_prompt="You are a helpful assistant.", + user_prompt="What is the weather today?", + max_tokens=100 + ) + + >>> print(response) + Output: [{'output': 'The weather today is sunny with a high of 25°C.'}] + + Notes + ----- + The method will use a pipeline's `generate_data` function to create outputs + based on the provided configuration. + """ + + data = self._generate(language, task_type, system_prompt, user_prompt, do_sample, temperature, top_p, max_tokens, num_samples, output_format, question_path) + logger.info("Data generation completed successfully for task_type: %s", task_type) + + return data + + def _generate(self, + language: str = 'zh', + task_type: str = 'dialogue', + system_prompt: str = None, + user_prompt: str = None, + do_sample: bool = True, + temperature: float = 0.95, + top_p: float = 0.7, + max_tokens: int = 4095, + num_samples: int = 10, + output_format: str = "alpaca", + question_path: str = None + ): + """ + Generate text data based on the specified configuration. + + Parameters + ---------- + language : str, optional + The language of data in data generation. Must be one of 'zh', 'en'. + Default is 'zh'. + + task_type : str, optional + The type of task for data generation. Must be one of 'question', 'answer', or 'dialogue'. + Default is 'dialogue'. + + system_prompt : str, optional + A system-level prompt to guide the text generation. + Default is None. + + user_prompt : str, optional + A user-provided prompt to initiate the text generation. + Default is None. + + do_sample : bool, optional + Whether to use sampling during text generation. + - If True, enables sampling strategies like temperature and top_p. + - If False, uses deterministic decoding (e.g., greedy decoding), and + `temperature` and `top_p` are ignored. + Default is True. + + temperature : float, optional + Sampling temperature to control randomness. + - Must be a positive number in the range [0.0, 1.0]. + - Higher values produce more diverse outputs, while lower values make + the output more focused and deterministic. + Default is 0.95. + + top_p : float, optional + Nucleus sampling parameter for controlling randomness. + - Limits token selection to the top cumulative probability range + defined by p. + - Must be in the range [0.0, 1.0]. + Default is 0.7. + + max_tokens : int, optional + The maximum number of tokens to generate in the output. + - Default: 4095. + - Maximum allowed value: 4095 (values exceeding this will be capped). + + num_samples : int, optional + The number of output samples to generate. + Default is 10. + + output_format : str, optional + The format of the output. + Default is "alpaca". + + question_path : str, optional + The path to a file containing a list of questions. + - Only applicable when `task_type` is set to 'answer'. + - The model will read the file and generate answers for each question in the file. + - The output will be returned in a specific format as defined by the `output_format` parameter. + Default is None. + + Returns + ------- + list of dict + A list of dictionaries containing the generated outputs. + + Examples + -------- + >>> # Create an instance of EDG4LLM + >>> generator = EDG4LLM(model_provider="chatglm", model_name="chatglm-4-flash", base_url="https://api.example.com", api_key="your_api_key") + + >>> # Generate a dialogue response + >>> response = generator.generate( + task_type="answer", + system_prompt="You are a helpful assistant.", + user_prompt="What is the weather today?", + max_tokens=100 + ) + + >>> print(response) + Output: [{'output': 'The weather today is sunny with a high of 25°C.'}] + + Notes + ----- + The method will use a pipeline's `generate_data` function to create outputs + based on the provided configuration. + """ + + self._tConfig = { + "language": language, + "task_type": task_type, # The type of task for data generation + "system_prompt": system_prompt, # The system-level prompt + "user_prompt": user_prompt, # The user-provided prompt + "do_sample": do_sample, # Whether to use sampling + "temperature": temperature, # Sampling temperature + "top_p": top_p, # Nucleus sampling parameter + "max_tokens": max_tokens, # Maximum tokens in the output + "num_samples": num_samples, # Number of output samples + "output_format": output_format, # Desired output format + "question_path": question_path + } + + # Call the pipeline's generate_data method using the configuration dictionary + data = self.pipeline.generate_data(self._tConfig) + + return data diff --git a/build/lib/edg4llm/core/pipeline.py b/build/lib/edg4llm/core/pipeline.py new file mode 100644 index 0000000..d8ba770 --- /dev/null +++ b/build/lib/edg4llm/core/pipeline.py @@ -0,0 +1,88 @@ +import os +from typing import Any, Tuple, Dict + +from edg4llm.utils.logger import custom_logger +from edg4llm.core.dataGenerators import DataGenerator + +logger = custom_logger("DataPipeline") + +class DataPipeline: + """ + The DataPipeline class manages the entire process of generating data, designed to + automatically create fine-tuning data for different task types such as question + generation, answer generation, and dialogue generation. + + This class uses a DataGenerator object to handle the core logic of data generation + and dynamically executes the corresponding task based on the provided configuration + parameters. It provides a unified interface for users to easily invoke specific + data generation methods with minimal configuration. + + Attributes: + ---------- + data_generator (DataGenerator): An object that handles the specific data generation tasks. + + Methods: + ---------- + __init__(pConfig): Initializes the DataPipeline class and creates a DataGenerator + object based on the configuration. + generate_data(tConfig): Generates fine-tuning data based on the task configuration. + Supported task types include question generation, answer generation, + and dialogue generation. + """ + + def __init__(self, pConfig): + """ + Initializes the data generation process. + + Parameters + ---------- + pConfig : dict + Configuration for initializing the DataGenerator. Expected to contain: + - model_provider: str + The type of language model to use, by default "chatglm". + - model_name: str + The specific model to use within the model type, by default "chatglm-4-flash". + - base_url : str + The base URL of the LLM API. + - api_key : str + The API key for authentication. + """ + + self.data_generator = DataGenerator(pConfig) + + def generate_data(self, tConfig) -> Dict: + """ + Generates data based on the provided configuration. + + Parameters + ---------- + tConfig : Dict + Task configuration containing the following keys: + - task_type : str + Specifies the type of task ('question', 'answer', or 'dialogue'). + - Other parameters required for data generation, specific to the task type. + + Returns + ------- + dict + A dictionary containing the generated fine-tuning data. + + Raises + ------ + ValueError + If the provided task type is unsupported. + """ + if tConfig["task_type"] == "question": + logger.info("Generated data for task_type: 'question'") + data = self.data_generator.generate_question(tConfig) + elif tConfig["task_type"] == "answer": + logger.info("Generated data for task_type: 'answer'") + data = self.data_generator.generate_answer(tConfig) + elif tConfig["task_type"] == "dialogue": + logger.info("Generated data for task_type: 'dialogue'") + data = self.data_generator.generate_dialogue(tConfig) + else: + logger.error("Unsupported task type: %s", tConfig["task_type"]) + raise ValueError("Unsupported task type") + + return data diff --git a/build/lib/edg4llm/generators/__init__.py b/build/lib/edg4llm/generators/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/edg4llm/generators/text_generators/__init__.py b/build/lib/edg4llm/generators/text_generators/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/edg4llm/generators/text_generators/answer_generator.py b/build/lib/edg4llm/generators/text_generators/answer_generator.py new file mode 100644 index 0000000..a66d0c2 --- /dev/null +++ b/build/lib/edg4llm/generators/text_generators/answer_generator.py @@ -0,0 +1,191 @@ +import os +import sys +import json +from typing import Dict, Any + +from edg4llm.utils.logger import custom_logger +from edg4llm.generators.text_generators.base_generator import BaseGenerator + +logger = custom_logger("AnswerGenerator") + +class AnswerGenerator(BaseGenerator): + """ + A class for generating answers based on user queries using a specified model. + + This class extends the `BaseGenerator` class and provides functionality to generate + answers to user queries based on a given configuration. It interacts with the model's + `execute_request` method to generate responses based on system-level and user-level prompts. + It supports customization through parameters such as temperature, sampling strategies, + and token limits. + + Attributes + ---------- + model : object + The model interface used for generating answers. + + Methods + ------- + generate(tConfig: dict) -> list of dict: + Generates answers based on the provided configuration. + + Notes + ----- + - The `generate` method ensures valid answers are returned, retrying if necessary. + - It logs progress for each generated answer. + """ + + def __init__(self, model): + """ + Initialize the AnswerGenerator. + + Parameters + ---------- + model : object + The model interface used for generating answers. + """ + + super().__init__(model) + + def generate(self, tConfig) -> str: + """ + Generate answers based on the provided configuration. + + This method generates one or more answers based on the parameters provided in + the `tConfig` dictionary. It uses the model's `execute_request` method to generate + answers based on the system and user prompts, with options to control randomness, + output length, and sampling strategy. + + Parameters + ---------- + tConfig : dict + A configuration dictionary containing the following key-value pairs: + - "system_prompt" : str, optional + A system-level prompt that provides context for generating the answer. Default is an empty string. + - "user_prompt" : str + A user-provided prompt (query) to generate the corresponding answer. + - "model" : str, optional + The specific model to use for answer generation. Default is "glm-4-flash". + - "do_sample" : bool, optional + Whether to use sampling strategies during answer generation. Default is True. + - "temperature" : float, optional + A sampling parameter to control the randomness of the output. Must be between 0.0 and 1.0. Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter controlling the cumulative probability range for token selection. + Must be between 0.0 and 1.0. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens to generate in the answer. Default is 4095. + - "num_samples" : int, optional + The number of answers to generate. Default is 1. + + Returns + ------- + list of dict + A list of dictionaries containing the generated answers. Each dictionary + includes the generated answer content and relevant metadata. + + Notes + ----- + - The method will retry generating answers if the model fails to provide a valid response. + - Progress and debug information are logged for each generated answer. + """ + + # Extract configuration parameters + system_prompt = tConfig.get("system_prompt", "") + user_prompt = tConfig.get("user_prompt", "") + do_sample = tConfig.get("do_sample", True) + temperature = tConfig.get("temperature", 0.95) + top_p = tConfig.get("top_p", 0.7) + max_tokens = tConfig.get("max_tokens", 4095) + num_samples = tConfig.get("num_samples", 1) # Default is to generate 1 sample + question_path = tConfig.get("question_path", None) + + try: + with open(question_path, "r", encoding="utf-8") as file: + data = json.load(file) + + if isinstance(data, dict): # If it's a single dictionary, wrap it in a list + data = [data] + elif not isinstance(data, list): # Ensure it's a list of dictionaries + raise ValueError("Invalid JSON structure. Expected a list or a dictionary.") + + # Extract questions + questions = [item["question"] for item in data if "question" in item] + except FileNotFoundError: + logger.error("The file at path %s was not found.", question_path) + return None + except json.JSONDecodeError as e: + logger.error("Error decoding JSON from file %s: %s", question_path, str(e)) + return None + except Exception as e: + logger.error("Unexpected error: %s", str(e)) + return None + + if len(questions) != num_samples: + logger.error( + "The number of questions (%d) does not match the expected number (%d). Please check your input.", + len(questions), + num_samples, + ) + + sys.exit(1) # 非零退出码表示异常终止 + + # List to store the generated dialogues + dialogues = [] + + # Generate dialogues for the specified number of samples + total_samples = num_samples # Total number of samples to generate + logger.info("Starting the data generation process.") + for _idx, question in enumerate(questions): + retry_count = 0 # 初始化重试计数 + max_retries = 5 # 设置最大重试次数(根据需要调整) + + while True: # Keep trying until valid dialogue data is generated + retry_count += 1 + + generated_answer = self.model.execute_request( + system_prompt=system_prompt, + user_prompt=user_prompt.replace("EDG4LLM", question), + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + ) + + if "error" in generated_answer: + logger.warning( + "Sample %d: Request failed with error: %s. Retrying (%d/%d)...", + _idx + 1, + generated_answer["error"], + retry_count, + max_retries, + ) + + if retry_count >= max_retries: + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx + 1) + break # 跳出当前样本,进入下一个 + continue # 继续当前样本的生成 + + # Convert the generated dialogue to the desired format (e.g., Alpaca format) + converted_generated_answer = self._convert_original_to_alpaca_answer(system_prompt, question, generated_answer) + + if converted_generated_answer is not None: + # If the dialogue is valid, append it to the results and break the loop + dialogues.append(converted_generated_answer) + break + else: + logger.warning( + "Sample %d: Generated answer is None. Retrying (%d/%d)...", + _idx + 1, + retry_count, + max_retries, + ) + + if retry_count >= max_retries: + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx + 1) + break # 跳出当前样本 + + # Log the progress of dialogue generation + progress = ((_idx+1) / total_samples) * 100 + logger.info("Data generation progress: %.2f%% (%d/%d samples completed)", progress, _idx+1, total_samples) + + return dialogues diff --git a/build/lib/edg4llm/generators/text_generators/base_generator.py b/build/lib/edg4llm/generators/text_generators/base_generator.py new file mode 100644 index 0000000..a857635 --- /dev/null +++ b/build/lib/edg4llm/generators/text_generators/base_generator.py @@ -0,0 +1,131 @@ +import os +from abc import ABC, abstractmethod +from typing import Dict + +from edg4llm.processor.postprocess import PostProcessor +class BaseGenerator(ABC): + """ + Base class for all data generators, defining a common interface for generating data. + + This class serves as a foundation for different types of data generators, providing common functionality + such as interaction with a model and post-processing of generated data. Specific generators should extend + this class and implement their own `generate` method. + + Attributes + ---------- + model : object + The model interface used for generating data. + postprocessor : PostProcessor + An instance of the PostProcessor class for handling post-processing of generated data. + + Methods + ------- + generate(prompt: str) -> str + Abstract method to generate data based on a prompt. Must be implemented by subclasses. + + """ + def __init__(self, model): + """ + Initialize the generator. + + Parameters + ---------- + model : object + The model interface used for generating data. + """ + + self.model = model + self.postprocessor = PostProcessor() + + @abstractmethod + def generate(self, prompt: str) -> str: + """ + Convert original data into Alpaca format. + + This method uses the PostProcessor to process conversation data and structure it + in a format suitable for Alpaca-based models. + + Parameters + ---------- + system_prompt : str + The system-level prompt for context in the Alpaca format. + single_data : str + The raw conversation data to be processed. + + Returns + ------- + dict + The conversation data converted to Alpaca format. + """ + pass + + def _convert_original_to_alpaca(self, system_prompt, single_data): + """ + Convert original data into Alpaca format. + + This method uses the PostProcessor to process conversation data and structure it + in a format suitable for Alpaca-based models. + + Parameters + ---------- + system_prompt : str + The system-level prompt for context in the Alpaca format. + single_data : str + The raw conversation data to be processed. + + Returns + ------- + dict + The conversation data converted to Alpaca format. + """ + + converted_data = self.postprocessor.dialogue_postprocessing(conversation_data=single_data, system_prompt=system_prompt) + + return converted_data + + def _convert_original_to_json(self, single_data): + """ + Convert original data into JSON format. + + This method uses the PostProcessor to process raw data into a JSON-compatible structure. + + Parameters + ---------- + single_data : str + The raw question data to be processed. + + Returns + ------- + dict + The data converted into JSON format. + """ + + converted_data = self.postprocessor.question_postprocessing(question_data=single_data) + + return converted_data + + def _convert_original_to_alpaca_answer(self, system_prompt, question, single_data): + """ + Convert original data into Alpaca answer format. + + This method uses the PostProcessor to process raw data into an answer format suitable for Alpaca-based models. + + Parameters + ---------- + system_prompt : str + The system-level prompt for context in the Alpaca format. + question : str + The question text for which the answer is generated. + single_data : str + The raw answer data to be processed. + + Returns + ------- + dict + The data converted into Alpaca format. + """ + + converted_data = self.postprocessor.answer_postprocessing(question=question, answer=single_data, system_prompt=system_prompt) + + return converted_data + \ No newline at end of file diff --git a/build/lib/edg4llm/generators/text_generators/dialogue_generator.py b/build/lib/edg4llm/generators/text_generators/dialogue_generator.py new file mode 100644 index 0000000..e1a9e71 --- /dev/null +++ b/build/lib/edg4llm/generators/text_generators/dialogue_generator.py @@ -0,0 +1,159 @@ +import os +from typing import Dict, List, Any + +from edg4llm.utils.logger import custom_logger +from edg4llm.generators.text_generators.base_generator import BaseGenerator + +logger = custom_logger("DialogueGenerator") + +class DialogueGenerator(BaseGenerator): + """ + Dialogue Generator class for generating dialogues using a specified model. + + This class extends the `BaseGenerator` and utilizes the given model to generate dialogues + based on user input and system prompts. It provides flexibility to control generation parameters + like sampling strategies, temperature, and output format. + + Parameters + ---------- + model : object + The model interface used for generating dialogues. This model must have the + `execute_request` method for generating dialogue based on the given parameters. + """ + + def __init__(self, model): + """ + Initialize the Dialogue Generator. + + This constructor initializes the `DialogueGenerator` by calling the base class constructor + with the provided model. It sets up the necessary components for generating dialogues. + + Parameters + ---------- + model : object + The model interface to be used for generating dialogues. It should provide + the `execute_request` method to generate data based on the parameters. + + Notes + ----- + The `model` should be capable of handling inputs like system prompts, user prompts, + and additional parameters for controlling the text generation process. + """ + super().__init__(model) + + def generate(self, tConfig) -> List: + """ + Generate dialogues based on the provided configuration. + + This method generates one or more dialogues based on the parameters provided in + the `tConfig` dictionary. The method interacts with the model's `execute_request` + function to generate dialogue based on the system and user prompts. It also supports + various options for controlling randomness, output length, and sampling strategy. + + Parameters + ---------- + tConfig : dict + A configuration dictionary containing the following key-value pairs: + - "system_prompt" : str, optional + A system-level prompt that guides the dialogue generation. Default is an empty string. + - "user_prompt" : str, optional + A user-provided prompt to initiate the dialogue generation. Default is an empty string. + - "model" : str, optional + The specific model to use for generation. Default is "glm-4-flash". + - "do_sample" : bool, optional + Whether to use sampling strategies during text generation. Default is True. + - "temperature" : float, optional + A sampling parameter to control the randomness of output. Must be between 0.0 and 1.0. Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter controlling the cumulative probability range for token selection. + Must be between 0.0 and 1.0. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens to generate. Default is 4095. + - "num_samples" : int, optional + The number of dialogue samples to generate. Default is 1. + + Returns + ------- + list of dict + A list of dictionaries containing the generated dialogues. Each dictionary + includes the generated dialogue content. + + Notes + ----- + - The method will attempt to generate dialogues until a valid response is generated. + If the generated dialogue is `None`, it will retry. + - Progress is logged for each sample generated. + """ + + # Extract configuration parameters + system_prompt = tConfig.get("system_prompt", "") + user_prompt = tConfig.get("user_prompt", "") + do_sample = tConfig.get("do_sample", True) + temperature = tConfig.get("temperature", 0.95) + top_p = tConfig.get("top_p", 0.7) + max_tokens = tConfig.get("max_tokens", 4095) + num_samples = tConfig.get("num_samples", 1) # Default is to generate 1 sample + + # List to store the generated dialogues + dialogues = [] + + # Generate dialogues for the specified number of samples + total_samples = num_samples # Total number of samples to generate + logger.info("Starting the data generation process.") + for _idx in range(1, num_samples + 1): + retry_count = 0 # 初始化重试计数 + max_retries = 5 # 设置最大重试次数(根据需要调整) + + while True: # Keep trying until valid dialogue data is generated + retry_count += 1 + + generated_dialogue = self.model.execute_request( + system_prompt=system_prompt, + user_prompt=user_prompt, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + ) + + if "error" in generated_dialogue: + logger.warning( + "Sample %d: Request failed with error: %s. Retrying (%d/%d)...", + _idx, + generated_dialogue["error"], + retry_count, + max_retries, + ) + + if retry_count >= max_retries: + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx) + break # 跳出当前样本,进入下一个 + + continue # 继续当前样本的生成 + + + # Convert the generated dialogue to the desired format (e.g., Alpaca format) + converted_generated_dialogue = self._convert_original_to_alpaca(system_prompt, generated_dialogue) + + if converted_generated_dialogue is not None: + # If the dialogue is valid, append it to the results and break the loop + dialogues.append(converted_generated_dialogue) + break + else: + logger.warning( + "Sample %d: Generated dialogue is None. Retrying (%d/%d)...", + _idx, + retry_count, + max_retries, + ) + + if retry_count >= max_retries: + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx) + break # 跳出当前样本 + + + # Log the progress of dialogue generation + progress = (_idx / total_samples) * 100 + logger.info("Data generation progress: %.2f%% (%d/%d samples completed)", progress, _idx, total_samples) + + return dialogues diff --git a/build/lib/edg4llm/generators/text_generators/question_generator.py b/build/lib/edg4llm/generators/text_generators/question_generator.py new file mode 100644 index 0000000..3a4d99e --- /dev/null +++ b/build/lib/edg4llm/generators/text_generators/question_generator.py @@ -0,0 +1,151 @@ +import os +from typing import Dict, List, Any +from edg4llm.utils.logger import custom_logger +from edg4llm.generators.text_generators.base_generator import BaseGenerator + +logger = custom_logger("QuestionGenerator") + +class QuestionGenerator(BaseGenerator): + """ + A class for generating questions based on user prompts and configuration. + + This class extends the `BaseGenerator` class and provides functionality to generate + questions using a specified model. It interacts with the model's `execute_request` + method to create output based on user-defined parameters such as sampling strategies, + temperature, and maximum tokens. + + Attributes + ---------- + model : object + The model interface used for generating questions. + + Methods + ------- + generate(tConfig: dict) -> list of dict: + Generates questions based on the provided configuration. + + Notes + ----- + - The `generate` method ensures valid responses are returned, retrying if necessary. + - Logs progress for each generated question. + """ + + def __init__(self, model): + """ + Initialize the QuestionGenerator. + + Parameters + ---------- + model : object + The model interface used for generating questions. + """ + + super().__init__(model) + + def generate(self, tConfig: Dict) -> List: + """ + Generate questions based on the provided configuration. + + This method generates one or more questions using the parameters specified + in the `tConfig` dictionary. It interacts with the model's `execute_request` + method to generate output based on user prompts and various sampling options. + + Parameters + ---------- + tConfig : dict + A dictionary containing configuration options for question generation: + - "system_prompt" : str, optional + A system-level instruction to guide the question generation. Default is an empty string. + - "user_prompt" : str, optional + A user-provided input to guide the question generation. Default is an empty string. + - "model" : str, optional + Specifies the model for text generation. Default is "glm-4-flash". + - "do_sample" : bool, optional + Whether to use sampling during generation. Default is True. + - "temperature" : float, optional + Controls randomness in output. Value should be between 0.0 and 1.0. Default is 0.95. + - "top_p" : float, optional + Nucleus sampling parameter to limit token selection to a cumulative probability. Default is 0.7. + - "max_tokens" : int, optional + The maximum number of tokens for the output. Default is 4095. + - "num_samples" : int, optional + The number of question samples to generate. Default is 1. + + Returns + ------- + list of dict + A list of dictionaries containing the generated questions. + + Notes + ----- + - The method retries generation until a valid response is obtained. + - Logs progress for each generated sample. + """ + + # Extract parameters from the configuration + system_prompt = tConfig.get("system_prompt", "") + user_prompt = tConfig.get("user_prompt", "") + do_sample = tConfig.get("do_sample", True) + temperature = tConfig.get("temperature", 0.95) + top_p = tConfig.get("top_p", 0.7) + max_tokens = tConfig.get("max_tokens", 4095) + num_samples = tConfig.get("num_samples", 1) + + # Initialize a list to store generated questions + questions = [] + cur_len = 0 + # Generate questions for the specified number of samples + logger.info("Starting the data generation process.") + for _idx in range(1, num_samples + 1): + retry_count = 0 # 初始化重试计数 + max_retries = 5 # 设置最大重试次数(根据需要调整) + + while True: # Retry until a valid question is generated + retry_count += 1 + + generated_question = self.model.execute_request( + system_prompt=system_prompt, + user_prompt=user_prompt, + do_sample=do_sample, + temperature=temperature, + top_p=top_p, + max_tokens=max_tokens, + ) + + if "error" in generated_question: + logger.warning( + "Sample %d: Request failed with error: %s. Retrying (%d/%d)...", + _idx, + generated_question["error"], + retry_count, + max_retries, + ) + + if (retry_count >= max_retries): + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx) + break # 跳出当前样本 + + # Convert the raw output to a specific format + converted_question = self._convert_original_to_json(generated_question) + + if converted_question is not None: + cur_len = len(converted_question) + questions.extend(converted_question) + break + else: + logger.warning( + "Sample %d: Generated dialogue is None. Retrying (%d/%d)...", + _idx, + retry_count, + max_retries, + ) + + if retry_count >= max_retries: + logger.error("Sample %d: Max retries reached. Skipping this sample.", _idx) + break # 跳出当前样本 + + # Log progress for tracking generation completion + progress = (_idx / num_samples) * 100 + logger.info("Generation progress: %.2f%% (%d samples generated, %d/%d epoch completed)", progress, cur_len, _idx, num_samples) + + return questions diff --git a/build/lib/edg4llm/models/__init__.py b/build/lib/edg4llm/models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/edg4llm/models/baseModel.py b/build/lib/edg4llm/models/baseModel.py new file mode 100644 index 0000000..d3c62b8 --- /dev/null +++ b/build/lib/edg4llm/models/baseModel.py @@ -0,0 +1,126 @@ +""" +Module for defining the base class of EDG models. + +This file contains the abstract base class `EDGBaseModel`, which serves as a foundation for implementing various +machine learning models. The class defines key methods that must be implemented by any derived model class +to handle requests, send HTTP requests, and interact with APIs. + +Classes +------- +EDGBaseModel(ABC) + Abstract base class for EDG models, providing a standard structure for derived model implementations. + +Methods +------- +__init__(api_key: str = None, base_url: str = None, model_name: str = None) + Initializes the base model with API key, base URL, and model name. + +execute_request(system_prompt: str, user_prompt: str, **kwargs) -> str + Abstract method to process user input and generate model responses. + Must be implemented by derived classes. + +send_request(request: Dict[str, Any]) -> Dict[str, Any] + Abstract method to send HTTP requests and handle server interactions. + Must be implemented by derived classes. +""" + +import requests +from abc import ABC, abstractmethod +from typing import Any, Dict + +from edg4llm.utils.logger import custom_logger + +logger = custom_logger('baseModel') + + +class EDGBaseModel(ABC): + """ + Abstract base class for EDG models. + + This class defines the blueprint for machine learning model implementations. Derived classes must + implement methods to process user prompts, interact with APIs, and handle HTTP requests. + + Attributes + ---------- + api_key : str + The API key required for authenticating requests. + + base_url : str + The base URL of the model API endpoint. + + model_name : str + The name of the model, used to differentiate between various models. + """ + + def __init__(self, api_key: str = None, base_url: str = None, model_name: str = None): + """ + Initializes the base model with API key, base URL, and model name. + + Parameters + ---------- + api_key : str, optional + The API key for authenticating requests. Default is None. + + base_url : str, optional + The base URL of the model API endpoint. Default is None. + + model_name : str, optional + The name of the model, used for identifying different models. Default is None. + """ + self.api_key = api_key + self.base_url = base_url + self.model_name = model_name + + @abstractmethod + def execute_request(self, system_prompt: str, user_prompt: str, **kwargs) -> str: + """ + Abstract method to process and execute a request. + + This method must be implemented by derived classes. It processes user input and generates + responses based on a system prompt and additional parameters. + + Parameters + ---------- + system_prompt : str + The system-level instruction or prompt defining the role or behavior of the model. + + user_prompt : str + The user's input or query for the model. + + kwargs : dict + Additional parameters for processing the request. + + Returns + ------- + str + The response generated by the model. + + Notes + ----- + - Derived classes should implement this method to handle the specific logic for generating responses. + """ + pass + + @abstractmethod + def send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Abstract method to send HTTP requests. + + This method must be implemented by derived classes to handle API interactions and perform + error handling for HTTP requests. + + Parameters + ---------- + request : dict + A dictionary containing all necessary information for the HTTP request. + + Returns + ------- + dict + The server's response as a dictionary. + + Notes + ----- + - Derived classes should implement this method to handle API-specific logic and error handling. + """ + pass diff --git a/build/lib/edg4llm/models/chatglm.py b/build/lib/edg4llm/models/chatglm.py new file mode 100644 index 0000000..5c99629 --- /dev/null +++ b/build/lib/edg4llm/models/chatglm.py @@ -0,0 +1,273 @@ +import os +import requests +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast + +from edg4llm.utils.logger import custom_logger +from edg4llm.models.baseModel import EDGBaseModel +from edg4llm.utils.exceptions import HttpClientError, InvalidPromptError + +logger = custom_logger('chatglm') + +class EDGChatGLM(EDGBaseModel): + """ + EDGChatGLM interface for interacting with the ChatGLM model to generate text based on given prompts. + + This class provides an interface to interact with the ChatGLM model for generating text + based on a system and user prompt. It supports customizable parameters such as temperature, + sampling strategies, and model selection. It also handles HTTP requests and error management. + + Parameters + ---------- + base_url : str, optional + The base URL for the ChatGLM API. If not provided, defaults to None. + api_key : str, optional + The API key for authenticating with the ChatGLM API. If not provided, defaults to None. + """ + + def __init__(self, base_url: str = None, api_key: str = None, model_name: str = 'glm-4-flash'): + """ + Initialize the ChatGLM model interface. + + This constructor initializes the `EDGChatGLM` class by calling the base class constructor + and passing the API key, base URL, and model name ("ChatGLM"). It sets up the necessary + configuration for interacting with the ChatGLM API. + + Parameters + ---------- + base_url : str, optional + The base URL for the ChatGLM API. Default is None. + api_key : str, optional + The API key for authenticating with the ChatGLM API. Default is None. + model_name: str, optional + The specific model to use within the selected provider. Default is "glm-4-flash". + Notes + ----- + The base URL and API key are required for successful communication with the ChatGLM API. + """ + super().__init__(api_key, base_url, model_name=model_name) + + def execute_request( + self, + system_prompt: str = None, + user_prompt: str = None, + do_sample: bool = True, + temperature: float = 0.95, + top_p: float = 0.7, + max_tokens: int = 4095 + ) -> str: + """ + Generate text using the ChatGLM model based on the provided prompts and parameters. + + This method calls the internal request execution function and handles the text + generation process using the specified system and user prompts. It allows controlling + text generation via parameters such as temperature, sampling strategy, and token limits. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt that sets the context for the conversation. Default is None. + user_prompt : str, optional + The user-provided prompt that initiates the conversation. Default is None. + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter for controlling randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate in the output. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both the system and user prompts are None. + """ + response = self._execute_request(system_prompt, user_prompt, self.model_name, do_sample, temperature, top_p, max_tokens) + return response + + def send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Send an HTTP request to the ChatGLM API. + + This method sends a POST request to the ChatGLM API with the provided request data. + It returns the response data as a dictionary. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The response from the API in the form of a dictionary. + + Raises + ------ + HttpClientError + If any error occurs during the HTTP request process. + """ + response = self._send_request(request=request) + return response + + def _send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Internal method to send a POST request to the ChatGLM API. + + This method handles the actual HTTP POST request to the ChatGLM API. It includes + error handling for HTTP errors, connection issues, timeouts, and JSON decoding. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The JSON response from the API. + + Raises + ------ + HttpClientError + If an error occurs during the request. + """ + url = request.get("url", "https://open.bigmodel.cn/api/paas/v4/chat/completions") + headers = {**request.get("headers", {})} + json = request.get("json", {}) + try: + response = requests.post( + url=url, + headers=headers, + json=json, + timeout=30, + ) + response.raise_for_status() + return response.json()["choices"][0]["message"]["content"].strip() + + except requests.exceptions.HTTPError as e: + # Handle HTTP error exceptions + status_code = e.response.status_code + logger.error( + "HTTP error occurred. Status Code: %s, URL: %s, Message: %s", + status_code, + url, + e, + ) + + return {"error": "HTTP error", "status_code": status_code, "message": str(e)} + + + except requests.exceptions.ConnectionError as e: + # Handle connection errors + logger.error("Connection error occurred while connecting to %s: %s", url, e) + + return {"error": "Connection error", "message": str(e)} + + except requests.exceptions.Timeout as e: + # Handle timeout errors + logger.error("Timeout occurred while sending request to %s: %s", url, e) + + return {"error": "Timeout", "message": str(e)} + + + except requests.exceptions.RequestException as e: + # Handle any generic request exceptions + logger.error( + "Request exception occurred while sending request to %s: %s", url, e + ) + + return {"error": "Request exception", "message": str(e)} + + + except ValueError as e: + # Handle JSON decoding errors + logger.error("JSON decoding error occurred: %s", e) + + return {"error": "JSON decoding error", "message": str(e)} + + except Exception as e: + # Catch any unexpected errors + logger.critical( + "An unexpected error occurred while sending request to %s: %s", url, e + ) + + return {"error": "Unexpected error", "message": str(e)} + + def _execute_request( + self, + system_prompt: str = None, + user_prompt: str = None, + model: str = "glm-4-flash", + do_sample: bool = True, + temperature: float = 0.95, + top_p: float = 0.7, + max_tokens: int = 4095 + ) -> str: + """ + Internal method to prepare the request data and execute the request for text generation. + + This method prepares the necessary data (including headers, JSON body) for the + ChatGLM API request and then calls the `send_request` method to send the request + and return the response. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt that provides context for the dialogue generation. + Default is None. + user_prompt : str, optional + The user-provided prompt that initiates the generation. + Default is None. + model : str, optional + The model to use for the generation. Default is "glm-4-flash". + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter for controlling randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both the system and user prompts are None. + """ + if (system_prompt is None and user_prompt is None): + logger.error("Both prompts cannot be empty") + raise InvalidPromptError("Both prompts cannot be empty") + + request_data = { + "url": f"{self.base_url}", + "headers": { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + }, + "json": { + "model": model, + "messages": [ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": user_prompt}, + ], + "do_sample": do_sample, + "temperature": temperature, + "top_p": top_p, + "max_tokens": max_tokens, + }, + } + + response = self.send_request(request_data) + + return response diff --git a/build/lib/edg4llm/models/chatgpt.py b/build/lib/edg4llm/models/chatgpt.py new file mode 100644 index 0000000..6b7ad18 --- /dev/null +++ b/build/lib/edg4llm/models/chatgpt.py @@ -0,0 +1,286 @@ +import os +import requests +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast + +from edg4llm.utils.logger import custom_logger +from edg4llm.models.baseModel import EDGBaseModel +from edg4llm.utils.exceptions import HttpClientError, InvalidPromptError + +logger = custom_logger('chatgpt') + +class EDGChatGPT(EDGBaseModel): + """ + A class to interface with the ChatGPT model for text generation. + + This class extends the `EDGBaseModel` abstract base class to implement a specific interface + for interacting with the ChatGPT API. It supports text generation using system-level and + user-level prompts with customizable parameters such as temperature, sampling strategies, + and token limits. The class also includes methods to handle HTTP requests and manage errors. + + Attributes + ---------- + base_url : str + The base URL for the ChatGPT API endpoint. + api_key : str + The API key for authenticating with the ChatGPT API. + model_name : str + The specific model to use, defaulting to "gpt-4o-mini". + + Methods + ------- + execute_request(system_prompt: str, user_prompt: str, do_sample: bool, temperature: float, top_p: float, max_tokens: int) -> str: + Generates text using the ChatGPT model based on the provided prompts and parameters. + + send_request(request: Dict[str, Any]) -> Dict[str, Any]: + Sends an HTTP POST request to the ChatGPT API and returns the response as a dictionary. + + Notes + ----- + - The `base_url` and `api_key` are required for proper communication with the ChatGPT API. + - Provides detailed error handling for HTTP, connection, timeout, and JSON decoding issues. + - Supports customizable text generation parameters for flexibility in model behavior. + """ + + def __init__(self, base_url:str = None, api_key: str = None, model_name: str = "gpt-4o-mini"): + """ + Initialize the ChatGPT model interface. + + Parameters + ---------- + base_url : str, optional + The base URL for the ChatGPT API. Default is None. + api_key : str, optional + The API key for authenticating with the ChatGPT API. Default is None. + model_name : str, optional + The specific model to use, defaulting to "gpt-4o-mini". + """ + + super().__init__(api_key, base_url, model_name=model_name) + + def execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + ) -> str: + + """ + Generate text using the ChatGPT model based on the provided prompts and parameters. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + response = self._execute_request(system_prompt, user_prompt, self.model_name, do_sample, temperature, top_p, max_tokens) + return response + + def send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + + """ + Send an HTTP request to the ChatGPT API. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The response from the API in the form of a dictionary. + + Raises + ------ + HttpClientError + If any error occurs during the HTTP request process. + """ + + response = self._send_request(request=request) + return response + + def _send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + + """ + Internal method to send an HTTP POST request to the ChatGPT API. + + This method handles the actual HTTP POST request and manages error handling + for issues like connection failures, timeouts, and JSON decoding errors. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The JSON response from the API. + + Raises + ------ + HttpClientError + If an error occurs during the HTTP request. + """ + + url = request.get("url", "https://api.openai.com/v1/chat/completions") + headers = {**request.get("headers", {})} + json = request.get("json", {}) + try: + response = requests.post( + url=url, + headers=headers, + json=json, + timeout=30, + ) + + response.raise_for_status() + + return response.json()["choices"][0]["message"]["content"].strip() + + except requests.exceptions.HTTPError as e: + # Handle HTTP error exceptions + status_code = e.response.status_code + logger.error( + "HTTP error occurred. Status Code: %s, URL: %s, Message: %s", + status_code, + url, + e, + ) + + return {"error": "HTTP error", "status_code": status_code, "message": str(e)} + + + except requests.exceptions.ConnectionError as e: + # Handle connection errors + logger.error("Connection error occurred while connecting to %s: %s", url, e) + + return {"error": "Connection error", "message": str(e)} + + except requests.exceptions.Timeout as e: + # Handle timeout errors + logger.error("Timeout occurred while sending request to %s: %s", url, e) + + return {"error": "Timeout", "message": str(e)} + + + except requests.exceptions.RequestException as e: + # Handle any generic request exceptions + logger.error( + "Request exception occurred while sending request to %s: %s", url, e + ) + + return {"error": "Request exception", "message": str(e)} + + + except ValueError as e: + # Handle JSON decoding errors + logger.error("JSON decoding error occurred: %s", e) + + return {"error": "JSON decoding error", "message": str(e)} + + except Exception as e: + # Catch any unexpected errors + logger.critical( + "An unexpected error occurred while sending request to %s: %s", url, e + ) + + return {"error": "Unexpected error", "message": str(e)} + + + def _execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , model: str = "gpt-4o-mini" + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + ) -> str: + + """ + Internal method to prepare and execute the API request for text generation. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + model : str, optional + The specific model to use for text generation. Default is "gpt-4o-mini". + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + if (system_prompt is None and user_prompt is None): + logger.error("prompt不能同时为空") + raise InvalidPromptError("prompt不能同时为空") + + request_data = { + "url": f"{self.base_url}", + "headers": { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + }, + "json": { + "model": model, + "messages": [ + { + "role": "developer", + "content": system_prompt, + }, + { + "role": "user", + "content": user_prompt, + } + ], + "temperature": temperature, + "top_p": top_p, + "max_tokens": max_tokens + }, + } + + response = self.send_request(request_data) + return response diff --git a/build/lib/edg4llm/models/deepseek.py b/build/lib/edg4llm/models/deepseek.py new file mode 100644 index 0000000..edd6990 --- /dev/null +++ b/build/lib/edg4llm/models/deepseek.py @@ -0,0 +1,294 @@ +import os +import json +import requests +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast + +from edg4llm.utils.logger import custom_logger +from edg4llm.models.baseModel import EDGBaseModel +from edg4llm.utils.exceptions import HttpClientError, InvalidPromptError + +logger = custom_logger('deepseek') + +class EDGDeepSeek(EDGBaseModel): + """ + A class to interface with the DeepSeek model for text generation. + + This class extends the `EDGBaseModel` abstract base class to implement a specific interface + for interacting with the DeepSeek API. It allows generating text based on system-level and + user-level prompts, with customizable parameters such as temperature, sampling strategies, + and token limits. The class includes methods to handle HTTP requests and manage errors + specific to the DeepSeek API. + + Attributes + ---------- + base_url : str + The base URL for the DeepSeek API endpoint. + api_key : str + The API key for authenticating with the DeepSeek API. + model_name : str + The specific model to use, defaulting to "deepseek-chat". + + Methods + ------- + execute_request(system_prompt: str, user_prompt: str, do_sample: bool, temperature: float, top_p: float, max_tokens: int) -> str: + Generates text using the DeepSeek model based on the provided prompts and parameters. + + send_request(request: Dict[str, Any]) -> Dict[str, Any]: + Sends an HTTP POST request to the DeepSeek API and returns the response as a dictionary. + + Notes + ----- + - The `base_url` and `api_key` are required for proper communication with the DeepSeek API. + - Provides detailed error handling for HTTP, connection, timeout, and JSON decoding issues. + - Supports customizable text generation parameters for flexibility in model behavior. + """ + + def __init__(self, base_url:str = None, api_key: str = None, model_name: str = "deepseek-chat"): + """ + Initialize the DeepSeek model interface. + + Parameters + ---------- + base_url : str, optional + The base URL for the DeepSeek API. Default is None. + api_key : str, optional + The API key for authenticating with the DeepSeek API. Default is None. + model_name : str, optional + The specific model to use, defaulting to "deepseek-chat". + """ + + super().__init__(api_key=api_key, base_url=base_url, model_name = model_name) + + def execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + ) -> str: + """ + Generate text using the DeepSeek model based on the provided prompts and parameters. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + response = self._execute_request(system_prompt, user_prompt, self.model_name, do_sample, temperature, top_p, max_tokens) + return response + + def send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Send an HTTP request to the DeepSeek API. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The response from the API in the form of a dictionary. + + Raises + ------ + HttpClientError + If any error occurs during the HTTP request process. + """ + + response = self._send_request(request=request) + return response + + def _send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Internal method to send an HTTP POST request to the DeepSeek API. + + This method handles the actual HTTP POST request and manages error handling + for issues like connection failures, timeouts, and JSON decoding errors. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The JSON response from the API. + + Raises + ------ + HttpClientError + If an error occurs during the HTTP request. + """ + + url = request.get("url", "https://api.deepseek.com/chat/completions") + headers = {**request.get("headers", {})} + data = request.get("data", {}) + + if isinstance(data, dict): + data = json.dumps(data) + + try: + response = requests.request( + "POST", + url=url, + headers=headers, + data=data, + # timeout=30, + ) + + response.raise_for_status() + return response.json()["choices"][0]["message"]["content"].strip() + + except requests.exceptions.HTTPError as e: + # Handle HTTP error exceptions + status_code = e.response.status_code + logger.error( + "HTTP error occurred. Status Code: %s, URL: %s, Message: %s", + status_code, + url, + e, + ) + + return {"error": "HTTP error", "status_code": status_code, "message": str(e)} + + + except requests.exceptions.ConnectionError as e: + # Handle connection errors + logger.error("Connection error occurred while connecting to %s: %s", url, e) + + return {"error": "Connection error", "message": str(e)} + + except requests.exceptions.Timeout as e: + # Handle timeout errors + logger.error("Timeout occurred while sending request to %s: %s", url, e) + + return {"error": "Timeout", "message": str(e)} + + + except requests.exceptions.RequestException as e: + # Handle any generic request exceptions + logger.error( + "Request exception occurred while sending request to %s: %s", url, e + ) + + return {"error": "Request exception", "message": str(e)} + + + except ValueError as e: + # Handle JSON decoding errors + logger.error("JSON decoding error occurred: %s", e) + + return {"error": "JSON decoding error", "message": str(e)} + + except Exception as e: + # Catch any unexpected errors + logger.critical( + "An unexpected error occurred while sending request to %s: %s", url, e + ) + + return {"error": "Unexpected error", "message": str(e)} + + def _execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , model: str = "deepseek-chat" + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 2047 + ) -> str: + + """ + Internal method to prepare and execute the API request for text generation. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + model : str, optional + The specific model to use for text generation. Default is "deepseek-chat". + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 2047. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + if (system_prompt is None and user_prompt is None): + logger.error("prompt不能同时为空") + raise InvalidPromptError("prompt不能同时为空") + + request_data = { + "url": self.base_url, + "data": { + "messages": [ + {"content": system_prompt, "role": "system"}, + {"content": user_prompt, "role": "user"} + ], + "model": model, + "frequency_penalty": 0, + "max_tokens": max_tokens, + "presence_penalty": 0, + "response_format": {"type": "text"}, + "stop": None, + "stream": False, + "stream_options": None, + "temperature": temperature, + "top_p": top_p, + "tools": None, + "tool_choice": "none", + "logprobs": False, + "top_logprobs": None + }, + "headers": { + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'Authorization': f'Bearer {self.api_key}' + } + } + + response = self._send_request(request_data) + + return response diff --git a/build/lib/edg4llm/models/internlm.py b/build/lib/edg4llm/models/internlm.py new file mode 100644 index 0000000..6f51807 --- /dev/null +++ b/build/lib/edg4llm/models/internlm.py @@ -0,0 +1,281 @@ +import os +import requests +from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional, Union, cast + +from edg4llm.utils.logger import custom_logger +from edg4llm.models.baseModel import EDGBaseModel +from edg4llm.utils.exceptions import HttpClientError, InvalidPromptError + +logger = custom_logger('internlm') + +class EDGInternLM(EDGBaseModel): + """ + A class to interface with the InternLM model for text generation. + + This class extends the `EDGBaseModel` abstract base class to implement a specific interface + for interacting with the InternLM API. It allows generating text based on system-level and + user-level prompts, with customizable parameters such as temperature, sampling strategies, + and token limits. The class includes methods to handle HTTP requests and manage errors + specific to the InternLM API. + + Attributes + ---------- + base_url : str + The base URL for the InternLM API endpoint. + api_key : str + The API key for authenticating with the InternLM API. + model_name : str + The specific model to use, defaulting to "internlm2.5-latest". + + Methods + ------- + execute_request(system_prompt: str, user_prompt: str, model: str, do_sample: bool, temperature: float, top_p: float, max_tokens: int) -> str: + Generates text using the InternLM model based on the provided prompts and parameters. + + send_request(request: Dict[str, Any]) -> Dict[str, Any]: + Sends an HTTP POST request to the InternLM API and returns the response as a dictionary. + + Notes + ----- + - The `base_url` and `api_key` are required for proper communication with the InternLM API. + - Provides detailed error handling for HTTP, connection, timeout, and JSON decoding issues. + - Supports customizable text generation parameters for flexibility in model behavior. + """ + + def __init__(self, base_url:str = None, api_key: str = None, model_name: str = "internlm2.5-latest"): + """ + Initialize the InternLM model interface. + + Parameters + ---------- + base_url : str, optional + The base URL for the InternLM API. Default is None. + api_key : str, optional + The API key for authenticating with the InternLM API. Default is None. + model_name : str, optional + The specific model to use, defaulting to "internlm2.5-latest". + """ + super().__init__(api_key, base_url, model_name=model_name) + + def execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , model: str = "internlm2.5-latest" + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + ) -> str: + """ + Generate text using the InternLM model based on the provided prompts and parameters. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + model : str, optional + The specific model to use for text generation, defaulting to "internlm2.5-latest". + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + response = self._execute_request(system_prompt, user_prompt, model, do_sample, temperature, top_p, max_tokens) + return response + + def send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Send an HTTP request to the InternLM API. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The response from the API in the form of a dictionary. + + Raises + ------ + HttpClientError + If any error occurs during the HTTP request process. + """ + + response = self._send_request(request=request) + return response + + def _send_request(self, request: Dict[str, Any]) -> Dict[str, Any]: + """ + Internal method to send an HTTP POST request to the InternLM API. + + This method handles the actual HTTP POST request and manages error handling + for issues like connection failures, timeouts, and JSON decoding errors. + + Parameters + ---------- + request : dict + A dictionary containing the request data, including the URL, headers, and JSON body. + + Returns + ------- + dict + The JSON response from the API. + + Raises + ------ + HttpClientError + If an error occurs during the HTTP request. + """ + + url = request.get("url", "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions") + headers = {**request.get("headers", {})} + json = request.get("json", {}) + try: + response = requests.post( + url=url, + headers=headers, + json=json, + timeout=30, + ) + + response.raise_for_status() + return response.json()["choices"][0]["message"]["content"].strip() + + except requests.exceptions.HTTPError as e: + # Handle HTTP error exceptions + status_code = e.response.status_code + logger.error( + "HTTP error occurred. Status Code: %s, URL: %s, Message: %s", + status_code, + url, + e, + ) + + return {"error": "HTTP error", "status_code": status_code, "message": str(e)} + + + except requests.exceptions.ConnectionError as e: + # Handle connection errors + logger.error("Connection error occurred while connecting to %s: %s", url, e) + + return {"error": "Connection error", "message": str(e)} + + except requests.exceptions.Timeout as e: + # Handle timeout errors + logger.error("Timeout occurred while sending request to %s: %s", url, e) + + return {"error": "Timeout", "message": str(e)} + + except requests.exceptions.RequestException as e: + # Handle any generic request exceptions + logger.error( + "Request exception occurred while sending request to %s: %s", url, e + ) + + return {"error": "Request exception", "message": str(e)} + + except ValueError as e: + # Handle JSON decoding errors + logger.error("JSON decoding error occurred: %s", e) + + return {"error": "JSON decoding error", "message": str(e)} + + except Exception as e: + # Catch any unexpected errors + logger.critical( + "An unexpected error occurred while sending request to %s: %s", url, e + ) + + return {"error": "Unexpected error", "message": str(e)} + + def _execute_request( + self + , system_prompt: str = None + , user_prompt: str = None + , model: str = "glm-4-flash" + , do_sample: bool = True + , temperature: float = 0.95 + , top_p: float = 0.7 + , max_tokens: int = 4095 + ) -> str: + """ + Internal method to prepare and execute the API request for text generation. + + Parameters + ---------- + system_prompt : str, optional + The system-level prompt providing context for the text generation. Default is None. + user_prompt : str, optional + The user-provided prompt initiating the text generation. Default is None. + model : str, optional + The specific model to use for text generation. Default is "internlm2.5-latest". + do_sample : bool, optional + Whether to use sampling during text generation. Default is True. + temperature : float, optional + Sampling temperature to control randomness. Default is 0.95. + top_p : float, optional + Nucleus sampling parameter to control randomness. Default is 0.7. + max_tokens : int, optional + The maximum number of tokens to generate. Default is 4095. + + Returns + ------- + str + The generated text content from the model. + + Raises + ------ + InvalidPromptError + If both system and user prompts are None. + """ + + if (system_prompt is None and user_prompt is None): + logger.error("prompt不能同时为空") + raise InvalidPromptError("prompt不能同时为空") + + request_data = { + "url": f"{self.base_url}", + "headers": { + "Authorization": f"Bearer {self.api_key}", + "Content-Type": "application/json", + }, + "json": { + "model": model, + "messages": [ + { + "role": "system", + "content": system_prompt, + }, + { + "role": "user", + "content": user_prompt, + } + ], + "temperature": temperature, + "top_p": top_p, + "max_tokens": max_tokens + }, + } + + response = self.send_request(request_data) + return response diff --git a/build/lib/edg4llm/processor/__init__.py b/build/lib/edg4llm/processor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/edg4llm/processor/postprocess.py b/build/lib/edg4llm/processor/postprocess.py new file mode 100644 index 0000000..fd18904 --- /dev/null +++ b/build/lib/edg4llm/processor/postprocess.py @@ -0,0 +1,231 @@ +import json +from typing import Dict, List, Any + +from edg4llm.utils.logger import custom_logger + +logger = custom_logger("PostProcessor") + +class PostProcessor: + """ + A class for post-processing conversation and question data. + + This class provides methods to clean and structure raw data obtained from API responses or external sources. + It handles the removal of unnecessary markdown formatting, parses the data into valid JSON format, and + structures it for further use in applications such as chatbots or AI assistants. It can also incorporate + an optional system prompt into the processed data for context. + + Methods + ------- + dialogue_postprocessing(conversation_data: Dict[str, str], system_prompt: str = None): + Processes raw conversation data by cleaning, parsing, and adding an optional system prompt. + + question_postprocessing(question_data: str = None): + Processes raw question data by cleaning and structuring it into a list of questions. + + answer_postprocessing(question: str, answer: str, system_prompt: str = None): + Processes raw answer data by cleaning, parsing, and structuring it along with the question + and an optional system prompt. + """ + + def __init__(self): + pass + + def dialogue_postprocessing(self, conversation_data: Dict[str, str], system_prompt: str = None): + """ + Post-process conversation data. + + This function processes raw conversation data by removing unnecessary formatting and parsing it + into a valid JSON format. If a system-level prompt (system_prompt) is provided, it will be added + as an "instruction" field to the first conversation entry. The processed data is returned as a + dictionary with a "conversation" key. + + Parameters + ---------- + conversation_data : str + The raw conversation data in string format, typically from an API response or an external source. + It may contain markdown-style formatting such as "```json" or "```" that needs to be removed. + + system_prompt : str, optional + An optional system-level prompt that will be added to the "instruction" field of the first + conversation entry. If not provided, an empty string will be used. Default is None. + + Returns + ------- + dict or None + Returns a dictionary containing the processed conversation data structured under the "conversation" key. + Each item in the list corresponds to a conversation entry. If an error occurs during JSON parsing, + the function logs the error and returns None. + + Examples + -------- + >>> conversation_data = ''' + [ + {"input": "AAA", "output": "BBBB"}, + {"input": "CCC", "output": "DDDD"} + ] + ''' + >>> system_prompt = "You are a helpful assistant." + >>> processed_data = postprocessing(conversation_data, system_prompt) + + >>> # Output: + >>> { + "conversation": [ + {"input": "AAA", "output": "BBBB", "instruction": "You are a helpful assistant."}, + {"input": "CCC", "output": "DDDD"} + ] + } + + Notes + ----- + - The function removes any markdown formatting (like "```json" or "```") before parsing the data. + - If JSON parsing fails, an error is logged, and the function returns None. + """ + try: + # Clean and parse the JSON conversation data + conversation_data = json.loads(conversation_data.replace("```json", "").replace("```", "")) + except Exception as exception: + logger.error("Error parsing JSON: %s", str(exception)) + return None + + # Initialize the result dictionary with a "conversation" key + result = {"conversation": []} + + # Add the system prompt as an instruction to the first conversation entry if provided + for idx, data in enumerate(conversation_data): + if idx == 0: + data["instruction"] = system_prompt if system_prompt is not None else "" + result["conversation"].append(data) + + return result + + + def question_postprocessing(self, question_data: str = None): + """ + Post-process the question data. + + This function processes raw question data by removing unnecessary formatting and ensuring + it is in a valid JSON format. It converts each question into a structured dictionary with + the key "question" holding the processed content. + + Parameters + ---------- + question_data : str + The raw question data in string format, typically from an API response or external source. + The string may contain markdown-style formatting such as "```json" or "```" that should be removed. + + Returns + ------- + dict or None + Returns a dictionary with the format {"question": }. + If an error occurs during JSON parsing, it returns None. + + Examples + -------- + >>> question_data = "What is your name?" + >>> processed_data = question_postprocessing(question_data) + >>> print(processed_data) + Output: {'question': 'What is your name?'} + + Notes + ----- + - This function removes any markdown formatting (e.g., "```json" or "```") from the input string. + - If an exception occurs during JSON parsing, an error message is logged, and the function returns None. + """ + + try: + # Clean up and parse the JSON question data + question_data = json.loads(question_data.replace("```json", "").replace("```", "")) + except Exception as exception: + logger.error("Error parsing JSON: %s", str(exception)) + return None + + # Initialize the result with a "question" key + result = [] + + # Extract the question and assign it to the result + for _, data in enumerate(question_data): + result.append(data) + + return result + + def answer_postprocessing(self, question: str, answer: str, system_prompt: str = None): + """ + Post-process conversation data. + + This function processes raw conversation data by parsing it into a valid JSON format and structuring + it into a predefined format. It also adds an optional system prompt to each conversation entry + under the "instruction" key. The processed data is returned as a dictionary wrapped in a list. + + Parameters + ---------- + question : str + The input question or query from the user. + + answer : str + The raw answer data in string format, typically containing JSON content. + This string may contain markdown formatting (e.g., "```json" or "```") that needs to be removed. + + system_prompt : str, optional + An optional system-level prompt to provide context or instructions. This will be added to + each conversation entry under the "instruction" key. Default is None. + + Returns + ------- + list or None + Returns a list containing a dictionary with the processed conversation data. + The dictionary has a "conversation" key, which is a list of conversation entries. + Each entry contains "input", "output", and "instruction" keys. + If an error occurs during JSON parsing, the function logs the error and returns None. + + Examples + -------- + >>> # Input: + >>> question = "What is AI?" + >>> answer = ''' + [ + { + "input": question, + "output": "BBB" + } + ] + ''' + >>> system_prompt = "You are a helpful assistant." + + >>> # Function Call: + >>> processed_data = answer_postprocessing(question, answer, system_prompt) + + >>> # Output: + >>> [ + { + "conversation": [ + { + "input": "What is AI?", + "output": "BBB", + "instruction": "You are a helpful assistant." + } + ] + } + ] + + Notes + ----- + - The function removes any markdown formatting (like "```json" or "```") before parsing the data. + - If JSON parsing fails, the function logs an error and returns None. + - The output is wrapped in a list to allow for future extensibility. + """ + + try: + # Clean up and parse the JSON conversation data + conversation_data = json.loads(answer.replace("```json","").replace("```","")) + except Exception as exception: + logger.error("Error parsing JSON: %s", str(exception)) + return None + + # Initialize the result with a conversation key + result = {"conversation": []} + conversation = {"instruction" : system_prompt, "input" : question} + # Add the system prompt to the first conversation entry if provided + for idx, data in enumerate(conversation_data): + conversation['output'] = data["answer"] + result["conversation"].append(conversation) + return result diff --git a/build/lib/edg4llm/processor/preprocess.py b/build/lib/edg4llm/processor/preprocess.py new file mode 100644 index 0000000..a8ebe1e --- /dev/null +++ b/build/lib/edg4llm/processor/preprocess.py @@ -0,0 +1,139 @@ +import re +import sys +import json + +from edg4llm.utils.logger import custom_logger +from edg4llm.utils.data_utils import is_question_template_consistent +from edg4llm.utils.data_utils import is_answer_template_consistent +from edg4llm.utils.data_utils import is_dialogue_template_consistent + +from edg4llm.utils.template import Template + +logger = custom_logger("preprocess") + +class PreProcessor: + """ + A class for pre-processing user prompts before data generation. + + This class provides methods to validate and repair user prompts in different modes such as question, + answer, and dialogue. If a user prompt does not match the expected template, the methods automatically + append the corresponding format guidelines to ensure consistency. + + Methods + ------- + question_preprocess(user_prompt: str) -> str: + Validates and repairs user prompts in question mode. + + answer_preprocess(user_prompt: str) -> str: + Validates and repairs user prompts in answer mode. + + dialogue_preprocess(user_prompt: str) -> str: + Validates and repairs user prompts in Q&A (dialogue) mode. + """ + def __init__(self): + pass + + def question_preprocess(self, language: str, user_prompt: str) -> str: + """ + Validates and processes user prompts in question mode. + + Parameters + ---------- + language : str + The language of data in data generation. Must be one of 'zh', 'en'. + + user_prompt : str + The user's input prompt to be processed in question mode. + + Returns + ------- + str + The validated and, if necessary, repaired user prompt. + + Notes + ----- + - If the user prompt matches the question template, it is returned unchanged. + - If the user prompt does not match, format guidelines from `Template.question_template` + are appended to the prompt. + """ + + if is_question_template_consistent(user_prompt=user_prompt): + logger.info("User prompt matches the question template. Proceeding with data generation.") + return user_prompt + else: + logger.warning("User prompt does not match the question template. Automatically added format guidelines.") + if language == "zh": + repaired_user_prompt = user_prompt + '\n' + Template.question_zh_template + else: + repaired_user_prompt = user_prompt + '\n' + Template.question_en_template + return repaired_user_prompt + + def answer_preprocess(self, language: str, user_prompt: str) -> str: + """ + Validates and processes user prompts in answer mode. + + Parameters + ---------- + language : str + The language of data in data generation. Must be one of 'zh', 'en'. + + user_prompt : str + The user's input prompt to be processed in answer mode. + + Returns + ------- + str + The validated and, if necessary, repaired user prompt. + + Notes + ----- + - If the user prompt matches the answer template, it is returned unchanged. + - If the user prompt does not match, format guidelines from `Template.answer_template` + are appended to the prompt. + """ + + if is_answer_template_consistent(user_prompt=user_prompt): + logger.info("User prompt matches the answer template. Proceeding with data generation.") + return user_prompt + else: + logger.warning("User prompt does not match the answer template. Automatically added format guidelines.") + if language == "zh": + repaired_user_prompt = user_prompt + '\n' + Template.answer_zh_template + else: + repaired_user_prompt = user_prompt + '\n' + Template.answer_en_template + return repaired_user_prompt + + def dialogue_preprocess(self, language: str, user_prompt: str) -> str: + """ + Validates and processes user prompts in Q&A (dialogue) mode. + + Parameters + ---------- + language : str + The language of data in data generation. Must be one of 'zh', 'en'. + + user_prompt : str + The user's input prompt to be processed in Q&A mode. + + Returns + ------- + str + The validated and, if necessary, repaired user prompt. + + Notes + ----- + - If the user prompt matches the dialogue template, it is returned unchanged. + - If the user prompt does not match, format guidelines from `Template.dialogue_template` + are appended to the prompt. + """ + + if is_dialogue_template_consistent(user_prompt=user_prompt): + logger.info("User prompt matches the dialogue template. Proceeding with data generation.") + return user_prompt + else: + logger.warning("User prompt does not match the dialogue template. Automatically added format guidelines.") + if language == "zh": + repaired_user_prompt = user_prompt + '\n' + Template.dialogue_zh_template + else: + repaired_user_prompt = user_prompt + '\n' + Template.dialogue_en_template + return repaired_user_prompt diff --git a/build/lib/edg4llm/utils/__init__.py b/build/lib/edg4llm/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/build/lib/edg4llm/utils/config.py b/build/lib/edg4llm/utils/config.py new file mode 100644 index 0000000..a4534eb --- /dev/null +++ b/build/lib/edg4llm/utils/config.py @@ -0,0 +1,8 @@ +import dataclasses + +@dataclasses +class DefaultConfig: + """ + A placeholder class for default configuration settings. + """ + pass diff --git a/build/lib/edg4llm/utils/data_utils.py b/build/lib/edg4llm/utils/data_utils.py new file mode 100644 index 0000000..d928539 --- /dev/null +++ b/build/lib/edg4llm/utils/data_utils.py @@ -0,0 +1,157 @@ +import json +import re +from typing import Dict, List, Any + +def is_question_template_consistent(user_prompt: str) -> bool: + """ + Check if the user prompt contains a consistent question JSON template. + + Parameters + ---------- + user_prompt : str + The user-provided prompt to be validated. + + Returns + ------- + bool + True if the user prompt contains a valid and consistent question JSON template, + False otherwise. + + Notes + ----- + - The function uses a regular expression to extract the JSON template and compares it + with the target template. + - The target template is: + [ + { + "question": "AAA" + } + ] + - Returns False if the JSON extraction or comparison fails. + """ + target_template = [ + { + "question": "AAA" + } + ] + + # Regular expression to extract JSON template + pattern = r"\[\s*{\s*\"question\"\s*:\s*\"AAA\"\s*}\s*\]" + match = re.search(pattern, user_prompt) + + if match: + try: + extracted_template = json.loads(match.group(0)) + except json.JSONDecodeError: + return False + return extracted_template == target_template + return False + +def is_answer_template_consistent(user_prompt: str) -> bool: + """ + Check if the user prompt contains a consistent answer JSON template. + + Parameters + ---------- + user_prompt : str + The user-provided prompt to be validated. + + Returns + ------- + bool + True if the user prompt contains a valid and consistent answer JSON template, + False otherwise. + + Notes + ----- + - The function uses a regular expression to extract the JSON template and compares it + with the target template. + - The target template is: + [ + { + "answer": "AAA" + } + ] + - Returns False if the JSON extraction or comparison fails. + """ + target_template = [ + { + "answer": "AAA" + } + ] + + # Regular expression to extract JSON template + pattern = r"\[\s*{\s*\"answer\"\s*:\s*\"AAA\"\s*}\s*\]" + match = re.search(pattern, user_prompt) + + if match: + try: + extracted_template = json.loads(match.group(0)) + except json.JSONDecodeError: + return False + return extracted_template == target_template + return False + +def is_dialogue_template_consistent(user_prompt: str) -> bool: + """ + Check if the user prompt contains a consistent dialogue JSON template. + + Parameters + ---------- + user_prompt : str + The user-provided prompt to be validated. + + Returns + ------- + bool + True if the user prompt contains a valid and consistent dialogue JSON template, + False otherwise. + + Notes + ----- + - The function uses a regular expression to check for the dialogue JSON structure. + - The expected template format is: + [ + { + "input": "AAA", + "output": "BBB" + } + ] + """ + + pattern = r"\[\s*\{\{\s*\"input\"\s*:\s*\"AAA\"\s*,\s*\"output\"\s*:\s*\"BBB\"\s*\}\}\s*\]" + match = re.search(pattern, user_prompt) + return match is not None + +def save_data_to_json(data: List[Dict], output_path: str): + """ + Save a list of dictionaries to a JSON file. + + Parameters + ---------- + data : list of dict + A list of dictionaries to be saved to a JSON file. Each dictionary should contain + the data to be written. + + output_path : str + The path (including the filename) where the JSON data will be saved. + The file will be written in UTF-8 encoding. + + Returns + ------- + None + This function does not return any value. It saves the data to the specified file. + + Examples + -------- + >>> data = [{"name": "John", "age": 30}, {"name": "Jane", "age": 25}] + >>> save_data_to_json(data, "output.json") + + Notes + ----- + - The function uses `json.dump` to write the data to the file. + - Non-ASCII characters are preserved with the `ensure_ascii=False` argument. + - The file will be saved with an indentation of 4 spaces to make it human-readable. + """ + with open(output_path, 'w', encoding='utf-8') as f: + json.dump(data, f, ensure_ascii=False, indent=4) diff --git a/build/lib/edg4llm/utils/exceptions.py b/build/lib/edg4llm/utils/exceptions.py new file mode 100644 index 0000000..515dd57 --- /dev/null +++ b/build/lib/edg4llm/utils/exceptions.py @@ -0,0 +1,35 @@ +from typing import Optional + + +class HttpClientError(Exception): + """ + Exception raised for errors encountered in the HTTP client. + + Parameters + ---------- + message : str + A detailed error message describing the issue. + status_code : Optional[int], optional + The HTTP status code associated with the error, by default None. + + Attributes + ---------- + status_code : Optional[int] + The HTTP status code associated with the error. + """ + + def __init__(self, message: str, status_code: Optional[int] = None): + super().__init__(message) + self.status_code = status_code + + +class InvalidPromptError(Exception): + """ + Custom exception raised when an invalid or empty prompt is encountered. + + Notes + ----- + This exception is intended to handle cases where a required prompt input + is missing or invalid. + """ + pass diff --git a/build/lib/edg4llm/utils/logger.py b/build/lib/edg4llm/utils/logger.py new file mode 100644 index 0000000..de430c1 --- /dev/null +++ b/build/lib/edg4llm/utils/logger.py @@ -0,0 +1,104 @@ +import datetime +import logging + +__all__ = ['custom_logger'] + +# Define log level colors for terminal output +LOG_COLORS = { + 'DEBUG': '\033[96m', # Cyan + 'INFO': '\033[92m', # Green + 'WARNING': '\033[93m', # Yellow + 'ERROR': '\033[91m', # Red + 'CRITICAL': '\033[1;91m', # Bold Red + 'RESET': '\033[0m', # Reset color +} + +def custom_logger(name: str): + """ + Creates a custom logger with color-coded log levels and UTC+8 time formatting. + + Parameters + ---------- + name : str + The name of the logger, typically the name of the module or application. + + Returns + ------- + logging.Logger + A customized logger instance with color-coded levels and UTC+8 timezone support. + + Notes + ----- + - Log levels are color-coded for easier readability in terminal output. + - Log messages use UTC+8 timezone formatting. + - The logger prevents propagation to root loggers and clears existing handlers. + - The logger uses a custom `StreamHandler` with color support. + """ + # Create a logger instance + logger = logging.getLogger(name) + logger.setLevel(logging.INFO) # Default log level + logger.propagate = False # Disable propagation to root loggers + logger.handlers = [] # Clear any existing handlers + + # Define a custom log message format + formatter = logging.Formatter( + '[%(asctime)s]-[%(name)s:%(levelname)s]:%(message)s' + ) + + # Custom time converter to use UTC+8 + def _utc8_aera(timestamp): + """ + Convert a timestamp to a UTC+8 time tuple. + + Parameters + ---------- + timestamp : float + The timestamp to convert. + + Returns + ------- + time.struct_time + A time tuple in UTC+8 timezone. + """ + now = datetime.datetime.fromtimestamp(timestamp, tz=datetime.timezone.utc) + datetime.timedelta(hours=8) + return now.timetuple() + + # Set the custom time converter in the formatter + formatter.converter = _utc8_aera + + # Define a custom StreamHandler with color-coded log levels + class ColorStreamHandler(logging.StreamHandler): + """ + A custom logging stream handler that adds color coding to log messages. + + Methods + ------- + emit(record): + Formats and outputs a log record with color coding based on log level. + """ + def emit(self, record): + """ + Format and emit a log record with color coding. + + Parameters + ---------- + record : logging.LogRecord + The log record to process and output. + """ + try: + msg = self.format(record) # Format the log record + color = LOG_COLORS.get(record.levelname, LOG_COLORS['RESET']) # Get the color for the log level + # Write the log message with color + self.stream.write(f"{color}{msg}{LOG_COLORS['RESET']}\n") + self.flush() # Flush the stream + except Exception: + self.handleError(record) # Handle any errors during logging + + # Create and configure the custom handler + custom_handler = ColorStreamHandler() + custom_handler.setFormatter(formatter) + + # Add the custom handler to the logger + logger.addHandler(custom_handler) + + return logger diff --git a/build/lib/edg4llm/utils/template.py b/build/lib/edg4llm/utils/template.py new file mode 100644 index 0000000..3115009 --- /dev/null +++ b/build/lib/edg4llm/utils/template.py @@ -0,0 +1,113 @@ +from dataclasses import dataclass + +@dataclass +class Template: + """ + A class to define language-specific templates for user prompts, providing a strict JSON format + to preprocess user input. If the user's prompt does not include format instructions, the + appropriate template will be added to enforce the required structure. + + Attributes: + ---------- + question_zh_template : str + A JSON format template for Chinese question prompts. Ensures that generated questions + are returned in a JSON format with a "question" field. + + answer_zh_template : str + A JSON format template for Chinese answer prompts. Ensures that generated answers + are returned in a JSON format with an "answer" field. + + dialogue_zh_template : str + A JSON format template for Chinese dialogue prompts. Ensures that the interaction is + returned in a JSON format with "input" representing the question and "output" representing + the response. + + question_en_template : str + A JSON format template for English question prompts. Ensures that generated questions + are returned in a JSON format with a "question" field. + + answer_en_template : str + A JSON format template for English answer prompts. Ensures that generated answers + are returned in a JSON format with an "answer" field. + + dialogue_en_template : str + A JSON format template for English dialogue prompts. Ensures that the interaction is + returned in a JSON format with "input" representing the question and "output" representing + the response. + + Notes: + ----- + This class is designed for preprocessing user prompts. If a user's input does not include + specific format instructions, the appropriate template (based on language) is appended to + the user prompt to ensure compliance with the required JSON format. + """ + + question_zh_template = \ + """ + 严格遵循规则: 请以如下格式返回生成的数据, 只返回JSON格式,json模板: + [ + { + "question":"AAA" + } + ] + 其中question字段表示生成的问题 + """ + + answer_zh_template = \ + """ + 严格遵循规则: 请以如下格式返回生成的数据, 只返回JSON格式,json模板: + [ + { + "answer":"AAA" + } + ] + 其中answer字段表示生成的答案 + """ + + dialogue_zh_template = \ + """ + 严格遵循规则: 请以如下格式返回生成的数据, 只返回JSON格式,json模板: + [ + {{ + "input":"AAA","output":"BBB" + }} + ] + 其中input字段表示问题, output字段回答 + """ + + question_en_template = \ + """ + Strictly follow the rules: Please return the generated data in the following format, + only in JSON format. JSON template: + [ + { + "question":"AAA" + } + ] + The "question" field represents the generated question. + """ + + answer_en_template = \ + """ + Strictly follow the rules: Please return the generated data in the following format, + only in JSON format. JSON template: + [ + { + "answer":"AAA" + } + ] + The "answer" field represents the generated answer. + """ + + dialogue_en_template = \ + """ + Strictly follow the rules: Please return the generated data in the following format, + only in JSON format. JSON template: + [ + {{ + "input":"AAA","output":"BBB" + }} + ] + The "input" field represents the question, and the "output" field + represents the answer. + """ diff --git a/dist/edg4llm-1.0.14-py3-none-any.whl b/dist/edg4llm-1.0.14-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..88568a176eae270ce13ca665ad5a12f5787abf73 GIT binary patch literal 42481 zcma%ibBw6ZvhCP2du-dbZQHiF$F^=;Ob#880G_6Po^sEEr*u#Jt4ElEw&NiI!H zQc2KAN=eJAj84*w(orhdyEw4j-Q67l0sMQ52|r5B-E*14aBu(sb~*q6-2dFd*udF9 z%*58j(ZJcx(dqATTxn{_ZL%Z#oT#Zk^y8bDmMtz>ie)6rpQ(yB8fvwF_{k3w=U}7~ zPY=V()!+55F9vOvJa+Sk&=1V(=7)hs{zP z-9D7IZdn!x_2S8^aBdlRPGmu@Df!{>3qNj6kVZXDEvX z9_{xD0|xx@1tJW{_azypj#z})glafAotjjT6-FY3(4^FwQ2+$E2eb_l1As0?j!GyEfu#*+Yqt`tG)>iOp91@j|9Xl# zT>++_2CY7@dKt4>j$6Z!a0*<&Dz-_h`l6e`vnsu^Q$PrC_G=&nth4Gw4XMeIYhp}b zAOk0~Q%p#PeUKXI=cFbYe~r`z8?L zq5|4#=^+cyg5!CyH*$SA$&4Nz@2EK0*>1(#bpasj4iZMUo18~^Vw`Gc!z|`>ATs#0 zy(cINMZqttb>-Ve;}UG}R|h+chPrAf4>q}zUwJE;R0Dw2$kO<_sr80&%!0}Fe)Jg} zHVvo&LKN&R@%p5jf)7WbEQoPRoPrlJURyR3-GLIFr;px(a|YuIl~NRXBb$MU`X5z!=fGJ z-9`cctz?gVL5aUB98jH$3CGTpq-SZDOg9y)j5xx8AP(EjgX{2mWK-SS^MU!NynTsjeh;|n#%)~>EJOreQsiEDan-{c&_#EkuypQaoNtaR#@3o&d0eIB-$+W~Im9Se!BY9Bv_TjN1>saG#ok^3=)p>)o$!s8 zF4!ydfiZFO*C}D8z5pc5_yAmmhz9f+q7yYS*XA2PA?^#mVc6zx?wxsFeEe|@Wv(fk zUG%PCr}E3SUcaKv@0BB;pl69XK2iU9RHGC}K&I48yosrutI|R*(z16+`;Xz54@P!~ z#rO#RL*vC)`%ct~Y#jXK7VS80TR{`1bD~9Qbd$qk3O#xm6EWDkZc0fuJPZ7favXl~ zyLcJK(EL9S`?&mys1?=1^%avb&{#&9)=cvp*geLBQMPs0KyxId);$x8mcLqVaQNQR`PS*BbU-X1(9nUs; zz0}As%CtJHD=Np?qE)-rAnU+yTW$q`e8WLILb#Lh{r7ChLGa}sIK@P=^#GYQ5Erd$ zZp`UfE1maS>N>eE*Qn*ojXb27;ewlrPFn8VLiDh~OtIV?7POixLZd4=Xj!4O&9HVW z2O)9s?Xi`@S4KbBFht9}GX?NDBlDuyPGB5Gn!;)WoAk=INfsIwx+X&kbylo>6RyYz zMeim%Evt`L?2$`@EV7-+C9Nd1Hf3XY)b%A@Uo|>>jn-<~B>%XJcFUI}Ad(RUL-U)Oz4`sZ@g{;VYfU*vg!fK}3XqkX<0nGKOXR~Y96e0b2h^B-?a z2*cdiARt&B5OCOy@-H)Pm+WJYSIC&KB?z z>c?8$&be-OH-1GMGi(27nBNlHZEL6C#+muDrFEc5ceAJL2rx%y+yI@QP7ft+!dkVa z3%3#QwCA;Ixzb`>pGDhnSpWG)5`l05JYDiCfq@n>d;p7@7Pl ze`|SK5se}I=;h|8lZ=A7Ph&l^6V8G)YxSO<(G+(#gB+bOh zrIh=~yl@kr07~!=1Qey8pL?i1*)=L4Pi^b^{+u5l4MX6d9^{q8Utv=*Gt zf)W6UqZc6mB;xAc(mA~JKMDfJvYpaBN)g}eKzWT%$V%BrvBjV$he2KHH07YvkT$&` zL9o$Li$*4HN}w=^&n?`~8TFnT3|NFUr`!w76UiphBk)hM;ovPY5iM6;t{Mym73mMy zCc;Psc|__(LCzr{m@=nH{;ntd)wriZ#jh%=NSPL9)_}K~(`lsK42v{PK2DEzh-%`I z+Cb(r;zau*b)Tq`hlG|S>1tl-K5U_Dqs6Kh_ok;sD&NwU;wme3H=5xNqrKB8s0#X4 znG7;=?EVB{{VZ7DzlyoymlXYG2@}9+t(D`?xZ7xRV&j&>ND%iSw`{!YwWiEr4Zy|I zODIr|S%#9S4B6*vQ0U)Po*#5%xe`?XDxjfg6O|t9a2*ww!P=+~4<2y%nG^4#N-{2C z-Gy%z*CQI=n_^YiR04W&vI@2I-dvpkG2WtA~co+A>6 zWq$|sB|yor;TJtiL<#q}RpHo!-$Enag1^F@%p@~de^tRvw~yK{Jh)p=V|9s0Ba&|y za%izsyE@!Z)Ki?`!}?;%G+>2N&k?D8E_{ODFwK~`s;?p_kM((B){)=82r=)*Jm+Fd z6u5zdMjCB(@mrIZ$yLd>;7oYf8$Z1)Mhiv>MKRx$V&~HT*D=Nd!XoG-P>}co>p&H= zlyt7qST!q_saIuFS3>=sYgN$VP>t^WzS>EJ$tkH}HC1C|Tq8(>Uul|>Rjz#%Y#Toc z)1v*~6XASg%xkbPa}?4euc&90;riK)n9ULO|0R80P93M~EsLn@m`-dK!Ol=^onnp! zQA56^&^8rm{-=$E1ikI&c7JII>~IHg4hOlT(8f{fzK*i*zVUdBXXAryCRH~61s?#J znH)iI%+522DSct`h4fnZ`|}`L_NQXz2J%uriN#=Ky4HW(kbIN~X9}peIiK^8^QGo??=Yu}3>3;l&_#kh;1U?(z#E=j!qT6nQH&jH^?_a-gC%@3>h0mkX{2FiyWB?TK*Id) zPM@!gPJBB|bojLD0-Y{;$i34j63WLW_hqS_CG+iSIO0pnbqS5gBeos>`eEq_$ZQCX zv_~D0k8&Ht2Usj=veszw(y`@w5Np*yr*xtQHh$|8!Ar*_krJtOl|mi_n#~4$GqpLr z@|0OH*R8tF?MC_;{$$~au~n_3q6f-%3~Rr-z`bXP<81-FWEIgD-$zwvi&hVJ_cB}0 z_hne&n{<^E?X4B1kp!f4s-)ydTL27W{hVvr-3A z*fy6qDmS*?&rY|S1J~n0qJMcauYzx_)UkH+jITKx>0D^->PY6CGt?TCuSWoPl=^S@ z^yS4SYRVPE%k_X|iu_S4FqxSGydylvMv5;wDUOR0UJmM=@wrS~rG)UB6cfAClJj80 zsSY^1rIx?itePwEnw*evL4AXUaD`0V;{Z(rlGN?KVKm9N+t7?Y8eG!a^oi?+M!T~$ zd^cgkBjBRfh?bX&(Ijd_YFL3ddi`V}y*=c0Du9&@u^VHDn zKpxcMS^FH&xsyhOO@)vFZzS)}%kM@af`h5NhHmf@b1aBQ4w|GlngUb_sF2X#(x`|Y z%t2+1HC*4-;av?!1ULZje6K*q0{md3!9pEbav2`+7QRcNKfV-gm=bY?c6nCdbYs5a@9YxpkjPVIsY?SAd$5DtXs=Qpxj^q!3 z$=e8kJBbv_HE8&7UZ*85f2-V!$i^bZGGwAu1m&dQS9p^n!DP5`I~4}UZrB@ZlhLpY zP0BW0-fnnVnj^h&AA+V0*f8s2U#*Y%1jtufS#HUGi)tgAoZP4Ax`?gZ6fLwHV56Ti zY7c@nSoyTpz>U0m#5F^$6G&WsgHzhTKOWg!8}E;zMmKnF(vcJ|+EGncA8%Neam$Z4 zpz9x&*8l}8BRMUdRa=7WC>PA0j~>fCGY3LZWtEccY3Y9TS*n}OOS*e0C|7fK{&GLR z!I}ma!%8b;TI^v|KNiiIx1j(sFf@0CjBqME28IM*kS>UL=E@u=MILs<9(FEA5>=yr zj6Dqwl^5Coq|CBWxN_b(sf-3`kRCHs#YIBV!xCjG=|BQ|+yqTH#V@oKRm??}0!ydO zIK0h1J1jDg2V(f$%eaoS7qwM(`yfT2@Bg!`^DalKRP*vx%8EUyu@z@BnYC8|o_nNn z3-PsLA8gEl=YHdwHq`w!;Nqw5?&+Gzht^i#_@i{HQqrpcGLgWGG}M0qpAfv#6Jome zG()867M-W*_GHZvV!QW*C)A2P0w?i1{;0dGahbnnoGa_N)N_Fm4F^AKSHGJyv)P%3 zVQJ31pTkcy#&9!*-H%}LvWL+;$f=p9!EI!3TZ4^#+JkI)SK1OT`@4>jo#Qzsv}<7p z@UYj{De}~1+8B?swzZXG;zw3EhY`o>MPP;3BoIe7EC}hwDI`F)dj}%{Zs!HYoK*J>=5$yO5Ox$!5K`zKDZyS4(My($xKEpmcrS=J zD=r8rVfM^P5K&gV|A#DL7xtEndM}71t2XF|G$l^}gySg@%+o78<(e2tlh^SN05R^Z zehu0Tr*jZ>;3;4;54=zVHV1l;cBeQHF5vwfBrEGisP(2FoHX}f;ewqZ^NmJXCc~U8 z?`D2mluQ8+Dtp47A?(a!4I21yU7SKH^(zv}ucNxCOKStL6n$|>4fjCcTPS%#s)g4^ z_E4qJ$nDzgn?FLN0-oTceXe5XU$|hFT;I=1MbfF>0{3geOMEC|8cA#byH2?kwY>4c~zOCbcgA;$dQdZxb320017?{|9eoZ((m@ZDIQ_ zI9#MEW4FN$-}Rve146wI&@9V?F$yH0gbb+2Y`)M%jS55*ZOn>XmY}S3Q?o-rp@^qi zZ$2S}rF59d#&i=sxFKr+J#KATrCD1NGXdtd+Lcm*!~K}ui?ewDWL4 z$^=&25ehpyUO)NsV?Vg21c9bWsu2iA%_C~jCVJXP8IN0rnbA9t+!wzk5FEmWNL35P zO??rLoK2n%Pe#td7%7kRz5UF%9jIwh>ZVgrjIN*V?zmJ`TTB<*3*&V$r(SIg5N(jW z37B}CexGZn5;RM5IZTKecJ3P%kni%^{zvy@lc!##ITkZ*`oXSg@NzAkJP#*Z*_ zNgw!Vn10YbIaW2}eF7HDv}aCCD>bLaj(O(G{b(7428#+V83(j%#03-K0bWJ{$s2>$ zk`k?Xu8+e0xMcrl#a(=ZuTYdWn>|l=s2*k)iF#4Sj8_`KZ}DQMZNgdVe-ubSAj9 zJ=IOSJvV)Vs+)VwS7nsx?A5S%H?S5Lwjaec++fvN0I>e=tQpuk zxtcis+y1|}0gbKLEm0KTogTy1r3lY49oH5~74yR+9O5zpVnXH=! zt`l~zlyGeXg4EWw#E0`|8jATMb~L8^V+=X6!Rp+DKbQ&8&fUh__2!PO5~c!w+z11O zrAevjUP6ME4Ov+uzn>wcG-Q)UP4ZTAFu7(nlaUD;>UGT z)UZ4Qi;C#~C?*U?c8l57b_?&GKY6^n2BtAF)V2iVftHdIKtiD-#3u*_K=9G1Pp1cg zcZH5d1#=(Q8j%vFfJkK=Hc_OeP?k>2izBj_7*8M>O?A6x1rhPS(?) z>?=*Uh=D=}kBow^F(OwEfJ8x*w8hARKbZ!Wn?0OpBN> zpvoXcz!{2SqEK4tw`y`GEEAfcj--D4o8e3R0vPfi?I|bk`Sij z)joDi6_*Cu?sww2Mbs+jF$`~1;QOmJU{}ZrAj7RA8Uj~3)2KJ0y!}3pTD~%)F2-?< zn)F0nIugVZ(hv#II0O+i%0O!t8Gh>^a)ls?7DYv0qwHOM@ff(~IuHKOsLWgVIkI)# z(x2b$JoIxmM{qX7nF-}hY>iYz`hYIXH!=xp<7>(2mM@Jfi1>&`IMOh|tEhw=0ui~= z1fTx#CR++y;>w3j&5^|IDQJ}>VM^=@N$d#9+1LamACzZX!h*jR$AVO#)ubsw4-u1` z>Zf*N*G=Su)B;tq)vWr;$<8cqv(+{dBCNA`Vxv+c=?#J}6BZ-VlMt{_8wdfVuFS1B zi#N1il=AupUssu-0eY#KpRFA{eX@0}LJ798tl$uz0t_7`sr!9+sZ5D!OgHjiHPv#T z6Z}dd6Yef8LpA(UV24hS^G&ZC(@9)ocQmIe+u#1!Vrrsm@N`Vc0^@F}VWPJM6Mz^weS)?Jy)9_CXfL$mSeWeS*T*|xoiRUAzEcSi3=aws%=SH1KgKiv2 z7`7KxBLs}N;1Dwe&q2yq`Tz3xy%3N*<+5Abs`#u*ZZX&lZ9b$Jwv$dlvRZYfLC#tA z)<1Efs7sjX2?ZV?>N385G)=`yAGD?ku9SVwMmYzc22&m=i(roHbP?!VDmZ%_kEt1o zd4Em>x6JaZjh=n+^#EIF*8*qOJV;Bg!gcv=bMxBxUH-(tdsL$Ln1YQh)Y!)F+WvlG z>YWAH)y1e#i|lrhy1xS$HoD0oAMOh zpd0KN9?B&{s3vs*MwbXzF&ZZ_s4b?4t+R!?PeQbm)rr_R&F;Lf6gdw%OB2Rv{Ey!c zI#%-(H=28oD*=nd*H;B!YZPb~1Oo_Q<8$8C?3e|GP{$tYG|z~Z+meMQ=je%URem>i z{+9;|a2FOPvOR)LQ8du{%8dic&N8W?CqbaLH?SA@>`Mswy!gez$A*_TWZE3E0(`bi zZTeuGc8W3DK1dZdQ7{1ESR^eU^<-liQ4>XI3~A#o2bjOPbR@y3Y{JN8L>i>d0l*i7 zBy=0~%ATE4K0$lw4A>0KTJiTByjKJ}pQOf_W0&Dt?r@Cp zz+%nb#q-_t+NuWzm%qVK*%@)o2*@SFZH3J4qFR#sWI-1vC_Fg2Ju4oGrE3*i z0aI?Q(5N}}aRvE9-&HIeZl~{UpIvUZ=gF&`Yb>6Rm)nce4Sb3i_h{32&HkT*&>!CS z^S!+;E%01!`V%<5A*UJK9p28LngOm~x!WBspC_&{d_Qk{TfW?$pE-CC9ZHji)IXRN zJI2&R&sZoB!$=!a$+Y#QWLBvybsKXRlVO!KO}~}RMw-2`_jcjIk0b343PiastUs4^ z=0`m($v|~weVHAv1G7vZgY%a*xLv<5hayw4i7&+II?J0al($^$oHdK$Kh~N&nVKbt zzU*E?+8HpHTCS=Bo7?I{EdUlk%Pl^CU-+j^h3RA*49;EwcY3JFShZnS4GiYflwdic zrmL0)oC1JjBB=p*&=FmO>Hw(erYRi4XlQj7l#RMn97Yd`WzA}Lj!D#A%LGm@O^eWX zWy00OYM*bVj7-$H#e~z%@hBGi#wV2P$eSgS3+wrXZSdpP%f4V`n0tm<8^00e6 zrb`{3y7sWYPvODY{gmS*(IJ>OxT;s9BEt__!1hYtZ$=r(i%R5zChCi`CPalS(l$Rq z!&*Q@i^wxIYYh{gAi*JV6ECJ&mj5=}0Z)cP390J6kZnUU-Yur-?Jnd3a~`N}qO0n= z`k{h>oYHR(tJP~k4RAmm#%7ws4BfXR6kdkaGpU1xDf%`P=l~XbFz+%5*Up=a|j-ePhuDeoehzbRlg%LDXYyEy_o z)H1elX{z|ehO^%GTd6o)FdI@gJT=wETA4&mKHDjSljt;Y+l_N%GP4lQzF5NnA7CML6K7&tnrG8l25v1DrhDy7};)60W5 zimN6CY8H}=s>#miwu;tO;RZQw79?N)wPPKHq)5pz!^g6$jC_xB7T0d3dx0OF zsAVGhplq(w>D3EWL$3Y|u8^L#r!bP~27uDW`V^Vq)6|%6P`30B@dGNs6f^GYcfUNS zE(n1?P2`QIGcA-FT7`Z6izW;rwf#4tTf;}$cA>G=IhaK6Tx{e!t~xbrFN|~ow08)X zaQoZ04O=b4Ygj;e#=E`e6u{G|!RNzT&6*R|=>^<`!9;f0E+LGh!cmt;4v61{>9m)- zsJC9sxk;kx8Kv@Q)A~NUzN~?vyAWD|i85Vvaji@jN(h;5*`ZrYoBUatm+%9ha43wf z>iKbiBFD*8gc9OvWfG)WP~XZ<*qOyw7OUxuM zKkGS1$TOp5jQ1E+aVvb6(1y$PZq6owz0oI&#nqI!Ys901+k7S&QYtA{EeuaFqs4D0vaugn z4$d{?O=wd~F_Pu$XnvQ)qDGQ~q4vy(0exnD0CH6>(6{(ff(tQ?f}K6)#4o<%f1;yz z;D|Y(YR20O9L?W`@G@8)y97rR*~hOk&AmfN#^t;vYd(Xk;4BW0@Nk@GHFV1n4ks2R zo%{n*_f*Prb#E^>&>em)jl zwGNOZ`K7;wB zhqT787FyV)uEucrr#j=|Pcd%tl+v(Sf6#UTW^Vr6lf!Y@i?gvyIPc{TJ1^qSP6UUYcD4qsIEtlSU!-)X}AO z!HrBU+bhf{Hx`!8%D1=15~Gl)ji6y-?&HV@{fPs1$wNRY2OaOt8XGRizJjxqRJL@M zwi*%yV$VXiy0bpsri(&q92Lk35Ai7mBT8MS8$kDtuT*f${63SUUOh>)e!;*-qNOf4 zqohQXFiH;@_C-VxQ5GRql)0ZJ(y0$al?xLH4aqJpy5}g3Wt>PZ$L?J9z z??aj~XoRN~?eR}^G#$=M0{Cj6O68|SO>$$I2AMMS$=~bI3)Sw5Q8OikAtsw1pDyh) zm@^9upsg%tofSNJl=Mjrl*X#SMQcDK2@;9pf)k8XUA{Syio{I3IW5IjOgJfmp}XP_zsNt_bW3|{{fLPi4kM^1 z+RkZd8d(B2Zf4v~iXh`M9tWaRC+bejX~`Lx2sI3WGlsm&y^_m>%0OWO*bk^h zXq3LLiyZ?^1OEuH2SjfT4ASfREv=phETvHFaD;hxcz$a$5Ul2*tbhkki)@+B7Gy|+ z$0I-USI-(Jt>w$-$S#Qr1HsQ6;xuYcDyrhHNds28cv7Olo`@Z07(py9(;7s_(mH0r zuKn_0(Swk-UX4`I5KN98Hh{GQz65(wiKD(=k`qq39ZI|Gt|k-n1=YS_W#Q{YK_sH7 zVoqtE=r=8|=Lcd7st2e<-XLMCD@(quge)2kaxnxGB*a-T#KATTSOONxFZ(zI6)gmGM)oALteAEXQv23M(& z)o7NNTIkx~@%ie4UIl}dL@zi-Sx4cEl~r#FN6s^;OZRKqN6&>@^LPm>CMk|WyYEG6 zl?ovhRzgZAFb5p~zTlDJu`~)WSe02rU`8AOqoha@EDI$nuW6V|5)-ek4@xN`^kdIJ zK=-8Q5x_vlXpZhT_WqR^=hYHoV3JtNqg#h&m?(POP$l5DAwp`|{Hu2MqK0N`K%{-W zt249LS_6(N!-!$2*09u@R@lS;-8EHS)6co0!R!d%LBl-l^=hKwL{*_a24lK7AcVLq z@7%CZdHlk3&sHieu{f>PCDRdVOsmy{UoDr(Wl~>n75c^5<|@=PC)sl^R`*Ji;@;w$ z`+1AJAW7iQi~8F2qe2^{QrLX_b*Yo38egrC!F>53!75~SrU+>Yj?K)?f|btUTw>WW z{v4fo#*81{?1M@hp+;LqARD~Kz!j^s+}TlA4nI&!@Rn1#&Eis#>01lswq4^g+YLRx z`m#m*lM~d!EcuzU*h}JdB*crVS>T-mteIS#Ud8Rv%^Dfsn5WY#wd^!CQ-{9?_*)1=VLjzT1=vW5y{rCMOLoXy zBYWpTHySunHcTC%+VDS*q`{SJYlmHJ1huCR$RLFm_Al8Kv#E8uCpXBwU6Q@+-XNn8|}TY`-03l!gK$de?cxPm}k!S12RJ~ zhq*;(-hr){n?OhvIUk)6qyvHXbjggtR~kCYAo~@dxOy5&f*hZW^@O47 z&F`|;&nr~pHR|`i+V6b_^pF7`v%CI`weO~jVbA5Umlh_D*A3ajMFqaO))$)_DTuDf zw-vJL$M4ny6Qi0hj5a`@$eg(HVd$(9_7*$bdu}$^@C}(H0eu&M51Sz?xX;jb39ql% zh1hTJ_p6flE4XVUW`ilW5xKJWg+I~N62`5a2 zkNOJ(V_{(CX4k|rl8lG|I(F&;d3mMsqnOW-xxmJ6%no@PVp61t-Q&gFXQ`#>qxw@Y zEclEQm}al)^4eZU&<6GMt(`-JonpbmFPw^Ph3LXLk>Y0}QL8gAgEBAwtpm-Q=5PYf zx~hX8oxC>MQ=qE);4WF}@*UtQj~m?i44lz}atku!!qhyq&n|0eUBVAfPu$oR8=Wg! zbxnNfq}$N3=@?MquI{~>Jqm32eTG+EgiLY}nx$ypU|t%|iL-yU%)$MIF63yI&1!%9 z-ogL&)&DP{i-U`ale2}L?f9URAnTWjj7-*3-N8Ff++zlH5PH< z9Z8?rF_!NNbMAZ|g_nTyQj|pGl=Br;Xw4%RB}-8rcdXm9UZK5rFg&?qvQ0Yn6`}pF zCOXPR4@lAD$V1DaUFbA%1U5KTjc|%1)2mO->-k7NJFs z1r-Gg#7U5tcDrln@ESl$l_8crB5x>lKX4Z(B+mrb#O_`t@0(O4gJY^1Ij`rMNmJ4y zb~J9;GzFwdN?7u_Q1W&JqWV{C*{wNzhNlg#{TSBBl#DIFUfg z2qK0N?pMpeNQhd;f~Fh#j5pRG%=wu8 zWyizz0+Iyi2^m>tl$*|<;zYRpPdH>K)N7Ry(?OL4_w+b3N7CYz(=LOU))sP5GN%}R zs7-iu&Z4gGG{E$cHmKv>dsvet-_Y+9-W|4dNx7tRV8YOM4E*P7q!S<7rTY+I(T_2z zHwU>_S#+#P61wR)w-oR$XmH}-$!AF}QpI7Y@d@YYz*zmg$P896?E`dXT$?b6Rz!-l zok}7xQPQI#qWxtBfJxQ>)q_B;kKv(nEXH>sGoUx-2RfYfnAiy+q0^F>w_Y4a)P{r& z1(9l5+0Brjt07RiL4-+aXC+0WC9T_<>viTf{<)JF*>YSORyql4Y= z+`LT|X=tnG}!MHrY8@#{)g{LE z=<9lsAnjR#X$vb!;nu5!R4R~{$=K?|+zE)H?U3>Orp|&2DM>|gq8$|x=5R6`<2g79 zk7-Th;8Hu04M(;7CyJGDn>7?xO(9gy+z`}_N>58c6UD($QZEW=UP#Qd>>@0LwB>ts zuY-q^mq;OvYH=HIV71kA&MKV;7<>J$orKI|wM`MG3|G;tF#%12iGVKuK-h86T;BSM zOI~yg;mg^#Y+LYRvf0fTQ-POJ_Z7PWm4vAL*+_J2_VM8f7;s*%289c*5g4L>RS$bj zJJpi~Z1F8m7dPSZJ5f#M*Y0>gy zBV`h43^p}Zo8;WWq|0PnJqz%Zk+w&jwuYm*d)q*DPO*T+;jZu6c#&wVwe!}OgDJPA zALMT6O?@Lmz;bPtT*%?F%?IbGA|DtHMnX4Ifq^dNstz0@3C@GE6zSP7;aIZx)5aEL z85MzqILdQDtl#KlCNpb#FozHJzfjO0-TFDPx+H6+1%=83{cyX!_yH7Z;VXANAF?!WiP zaw9{*Yw&h>-k&lHPIhQPiSYG+vq&r;IIl561js01rdpKL-t{@b7 z7r^SO?Wc#?Ufn*WGfZ{UINg2F{D8UDP+}TkhLaU)Y?V24BI?pg9m74VXszaI62-7; zXGP+6_p#U21E=?8|GK-kKNdN3VD@A=y}Q{upjeblQ`9jOnvyf@m<=O8W2(GV32d~% zZ@~96hDMQVj~)Ur^o>K2AnVPR>9Dzrl@SL4Y9!vDp{0!ra_d@h{in_1sMP4YXXwuo@_R1*DGXfs5(mo3^woI@ zEaa{_Ck3|JXtfka5}u zjFw*!dBmj!xwa;!hVYXW@zLbRg{vzv#aKeL@m3UJ6Z!ph$4xg^4jnCRLJ8@GWKbtV zoI$mf&P^{0gq8`yyrKrQ3>+WdCy+{_frVY-VcRUKqfE(s@o&i$RoT)i7ttbHuSY5z zFS%GbujdOWU8rnS{khAzfPCM>)(WO0XC-JbohA}hb*|w$HBq+g;6g=RV5DpE zSf+H%uxo%WM*lEkcr)mnb0Rsi!VwIU2Qj9R$%%k8tjR}gnK~6|b`exxui|bFj1&(i z>#sQ%XH`dB$y8Bd*L=|9j%BD8ePdnWWBnqv@29vc-g}wpao_)hW0gR^qm$+e&CJ=L z@~nlcfECeelGU!xBbt&{+wS^!&Yzst9hi#kWHecl0w7O!CKn-0rIGJnpN0&;Sx(|a z>&0xOVF($-*l^cGmC@&Vq9DDjjDqO=`GLcAKL3VKx_G}8ftO8{_0NWu>3t4JYG+Vz z;T^K`_Y?rloh=@2J;fgjSE1ZSen%soQ=vBfHFe_cyp*I8LwRS%u<<3IvSf3@ ztYW%$`P;mIJL#5XX_$^w;^*F{-N&U3*2K|0n>f5CT;pj|Vi0@mVmvHi5Ua>SMl1NV z>9Uq(ury2anyH~aAe5r6dn7Eg)x}&;4Dpe$_&kR#c8Prr3FT3sGMmXms>$%^j@0L{ zj?SVr@@N-$L_XM177J*)uRI1KPc)O#qB)R?m&8|)OT89-Je>~wcn)dEN4(EQs4n^v z2G+PQfjYKIyiSV6m~&>1UX*r`rX6jNa~q1FO91XZ*b%yJM1%SP#ml#(BhoMM^1EUg z&DvWr1XcVOD=iL*YsxMPGtGoT^4*t1`;19Cb5=#QGrvinKuoXng3s^_m9YKrR2}6s zJ7*C>Iyxg99iP3k0=)x&TASWD@x>u)oF|@nPIpY+fX6`%E;{d&+e4^EH39&F2B!>*lU#v{27 z-i?RXUeU6m+BMI#%r=`u9o*N<3iBVEeNP;uKRYG9+mw)qUdNX20#O z6b)@-8mVa96T+Fwzy86x{;PRt8R<;k3K9Uof$V?2_W7?n&B)xq+05GJUmZP~Hg;HT z2;bLw0@ez`oRvoaQ+xJ6+Pw-# zPw&OQ>Kd`9FPNAPt&Me)n@o#1tvZe#r#4ou8VCUQc~J_ZEnPbo+A~ZPk>4Au zCpb;bhHz#MKY>`)j@SD9moC_qEglJV9J(g&(GCIwnGYoYd}45WetZv%s+n-}=--Ct zhkgH6mPXZFgwBC#CbxQsEWzCRR5?dkpz709tWQ5npb-I7)}*JoW)Z@h)X=Cc>O=yq z5VvFDz?V>?Doay@{%3^`yHfc<& zWtvh}`cshnl>He3V1QENPRm~l6yQA63N-8eg?buaC9HLPk6(Zs^L6tJOoNAgvrq9zfq7Z5yKNf zA(?}LEmNnWIN`C}OQSj8PKr=@Qh%ia4vJc_)_^7xXaq7p0wQw-4djmjfVyuZAHwqY z*)N7NfadqYi02GcbY@L7}v22&dpJyvSB}Eigipo-Bcpx8) zHzm3oB44mx*Og@>ASaAzn>>D-K-ac5e50QaNN)=CUgE+kFq2^N1P;sVKLvmnJ>!Z?Z(cX$NsR5|{o zYv&LgpitySn8Hu_Kx!tExD(Lm*mo)Vqi_2my>?nY=dVu9k9)`mX|GV{{{nZ}zCD|l z%*aMS{3+ogL(V5km(b6W3C7C9OpX&e8_|$A_jWxO_K`B=6nO zD4qCg8;RKYl1$b5w=)j)buC_KatJ??r$4qQR96`fP&FmZeU6n*=z;X+;mrGe zLVsoT3XV*vb834|@83AKBC_EU02xTJbbEu)0V zjyQkv#ECe2XRchiU?QAFz3q}hb|RipEYIXHaX|a!#^vt;9OUzKV_HT;&XvC5SpA515eP=}|7D#zNEfra* zp>K^R3(_%%$Ni^pCLaP%lrd73Kwc&`W=s+#L93O;nt?WE$HPh4zAENQ7k9Vk3W82| zM!*aT!@1Rf7jgr?>zxikPfcK?fB$9H`lTGG;=!rcG^C1qYT?{muQO%*GRQ(P=~F6| z^$DZe5gS@Hf1m9Q8>D2I@vFYoNWuFfG-Y?qO@34&T0xdT2dVJya+yL7T?qqnA}7}G zU#De>@>6*k=PB&V3NgrWg?_)|d8nc5Yipz*A z-ya6r1Am3F0q>vIv~oZI&&KY^o8b{DGNiwq8T7f`<(U&?wffCYnveiIdIp;xMVw6O zxj!x??p*rBn(%5KTRl0G^6IJ!NZM7|GrUHHBUE$qETo{oz8k=+X{WJxj)AfvyRL8F zoux%@CfRW4&~Mauhju34qcK0%-Y2-btns;z>G43%UWyiNr5Z9Qxsq%Ze_b8FAGw#- z6WYn8d=Au$T}X%QSk+>~10PKJSnglVz%#7EU&@7g46(um5X{LCUQsifZrUhlO5s45 z0yFYavZ;TpDST}tJO>G%pnPJ-D4e>@(}5G8e|OlQ_e}mR6Ag9@{`wL2k2BlAqnUdx zW1GWnQ`1;3dU6YI{MtlZ&#Krg^|&wzimCRcwq^Ybc`S*tW%nHEyZJWPP4H^MF*Py> zw9wuDt-7!H{M5ovmn8*Px)IiM&A0A*3i7xrnJV!`ciS6TuyjXTKTlg7`^Dez$&SDF z6&q`Q7!aJW!ls6kzjbfF^4gC)W~p3+nt2EGjonwN2f)x7HFLhc=F+ID767NZyJSknP0Z zG;}cgONZkRZu$rk32;Hk{Yp3Rf1Zo~gU~k2pY+jx5LyY@|35;rbNZL&dfkS&714WH z4FQ*CNps{9lBt&n2&bnUbAn^_7Q;{`pod}2iQSlSzL{lJ*AmQU%o6B;b~_Fq0uB1W zmMvvkTqRznmcGrcHWDOqHgMA*!I@X3f@-vahH4Uu_0Lf$l5a`yoJB)PMAWGY<-L~4 z_N1@t7f*Vp7vKBi>1re-_ox~&+rIgjts4kMGl7q=|Kye@iT= zM1DFleIuP|DN3u4u8JtDe(@`=oUsF3s8l^0mPTvLIuWiH7JX|M?PKx&EQM^V;)pMp zCH>Sfco)U~X;?2g1kjsr--L}0Lz<-mtsBjXnj(FgE|du*!(2RVM1;A0^=jpa=%B)r zorv<0R4EOi8~vTg?5$jP>E|YZVre_}lu#t?E9S-hx z!p*@4i&1fUX`6^vSygU@s+(+OBHF4x)s+B#HhG9grHJveWL+Z@OjP)x=KvzL`yewQ z5rrNSJ?1YwjqrzaD7_+%6G0vU30j*%d1PU{*OR@h5e+KlQWc0Ni-+S40BsL}*DiH+akOGzE2jq(qVGu|iE>D*=Ii82aonoP3D zL&&z<43RISO3+ZW_q(s2eb~$4%@griJH$ z+2-+dx4C(P9q$wU#r{RfATIo!H_BEiJuAbjdnDi!sk||cohI1{6vpy}CgnpON#(Sf zh6iv<69KTp6#x?AN4=JOrZ)?!3pOc7EXG#X@hc7-lgKJw@OnZQzQNiYTVNydY z-7Up;@4esE^9Yvm$>Ee*V^OWY+y3eH$)USnngOjfM>M1fjl49Sgr=Y6z4^h}QJ$cr zKi^T-p7G|6_QVV6dwYMyNB=w*GtG_7b&B!C<30CxPHr|(fA-+=n%8>Yv>U;K(}4Zx zLvc8p5$W$q+?9Ad*+`r7rpbz@%@!pvswyrsg9M})JJ?1}eIT6W)xO|Rw~i1Zz;aM{ zowH8z0_yEXAN=2zqvOAEt9@S{v?%9|_qES^L?=$)I03o^Y}g8g^r6JJ$7tP{U`JFI zTu|j0O6KS{G_F~Zj)im9PR;8EWH!7*2ZZL#4rK$F>5&g}EjCLEDe>_XG2loCsgt%5 z**ki_oLnr$r7K>p+)an8$s(a}93W)yr5i#ndl_(-9^on@A~&J%050h=x+;;B|LUnH zpW3K)!6mA#i6greR-9yz1{>SiRKJu4Y!f(q9Yd=X#aWhDWHYy3wLwGdU^MaLW#nGq z#aaLMB%pmG%yo%aBr_seM<*UjjP1gujWBhW*8XCSnr-dLkLNnXa4x2c!W- zM*C4KrsLrXM&k;fFa#~Xsb z>bXYUDI^A{O7lKH%hi<8itt3-m!)pwIfB-^_p%huq3z0o5Tq01Nl~UYjo0$0}`;ZE9@{C>IVW- z)YXDPj@PZXwl|g8U!uf;;P>9S!QxozR9xRtVR?6I(hgofrE6;Sw5aIOAT9hA&ICr5 z!N_QXxTXJ>u2^pc%`?@&2FN-C(u(G4sr&Du0ye$M!fLyuXWRA6vf@_m5uZmFqvdo) zk~Neeq6lt4RMreOk@K&e&eLq44r@GgF5VE&^b2;L%E2i;j57MB9eus_`R%io^+#Zl zC*gM&wdt0j-x@vh&U9B&5Je_l+$9Ry5;zBizqkaZIp58~oM-&t zp#%JXPB#U}6;$&EhVUYFEWMY9Ss7M7BFTODWG|m~t-$iP&4u{Pd~@bQ;$MdacYm($ z{akrEbx*Vvk;wc74g6;M!KXv(CZG4mobUT#-;aBG-?!!8-(Q!@+aK??+aQ-hNm$i@ zgH1>$Q&>beSbB2nL^9D#I}9%Z%mqi}C84r%dtBS*$2^@prG&(4cf6pO3hzRGxYQ3%e~HnoF%!cDfU8fw0SrBp^2Jg*n_o`n zolFU@#8*I+c)qfEbJNy0SX$?wSed-`^xfRyH4r@B!*{2vLW55#bzw1PqejzU{Z2t46nw?mievl0^lYEAYU`UbH73VE1FVmGJ0zJhwq{Q0Qeu+eox~Nh~7EXW-|;dhEWuRt34wCPz07qI;6UlHFI1LApREdyy;VSCN7R@ z^#R#;=>YEI><8dL{eIs=E_axvWn>wXbO?(wA3^p0s;b!V%C*V8e9`NzY*x_LRh*^EO^9mZnZaPBkIrB{_O z&nVxusb1chmYJm7d5o)-T2kxcsCk)qM4jGAVvxHRm{ff-H@714k`a<6O(pcu)(<4m zAalESB|Cb?J;rb6O>-74g@FciC{9?)wr;i0KPZxk7KcUSQI%Z&bp2SXr}P|cmc;VU zw81Zlqg{yz9Y_ZfBX-=r5Lb)KUr#yls5MjL2YRRB|`EIq}&qOSQXyP!&4Q0?gzggzTL< zNiN5}^dV8ABPLfdAs|J1vL5$am+wG-J#tPKKrF}97Nw_1ZXt30NSHCDl2oB+9d=}M zE}|T9f46d8$&v$8OF9M+2OR-p0Y;Elzr9;Xm$`h08Cdr(jEj`$+XL`i09HDCy;6*T~|Hr9aZ81+rs zV$EB&LUIR*Gl946tpA$Sfx+#|>koGBti zZ=yDhH?<#~Y(1LlBr4}uo7g$)ZC8~%xNC53!U&L+pExkLAF+BWsYDvmR*$0PP1E>P z6eMboxFaSoegrde^a-$0q(^@_v6PZvDERf5h!H6V8vH}XeM)f+{{)f8FG|p&($}06 z&LfY-DXOf*GZV~2_SnI!j`-RIDFLZV_u3s>W7QY7jCs(W98iDhLb!lhq+2hGKu@r) zAoOALWT8}LF=B}IL9(s0N7)Z)ER=<7_>W9K4#?`VX9I|Qws>R0<#c@I zZCFXTGiY=;zn~+NIfBXz1}!RrH%(}zCAkvRCoZQ=5^m{5i4FlLg+!e^A11Ii@9&iE zlQ~YvbPB#p1Sbc!8J^W^yVGNPcFD8Mul{1E4=Z#9T5v0{TrEl!&x!Dxi#Py@1g@jD z;N3F}yN&i+808e_i5?PC933q?&rvj{3Lg!4sIN^RDiAlv)Hutswh|gE-c}(zp_=&P z+}PFT@7$4l&5(A`gZw0D09U~5=|9R8i%6m3E(3&N>=0bE{pyKj3S)id)b=@Cr4wQx z`Ely|q6A42X*CVfHUjc&ri)?-8kmX=Ao^CECqBkdVRQ}H+T4TswDm{WnE&7`8yVnm z)4rdX(XLcPE$BYxoYk%fn&?}P9oC$5d>qKUm6c9!=JtF&VF)+bvV(nCtW(}$edzm zDAdfhxZH7N_hB)1j6_Z3>}GbbuCsp>waEj- ztpixG`1vHC%MH$_Yuo4SWWQ_&9U>oKj|qVLR56+yw@vlwZdP)Cv=E(+39@yry=|kr z?Ez^d9pTyTSj9Nh^-#F%&+AIOrC3k&XnK52Y%X!`UZO}t6CrYHB>E7H8}p9cF-)M@ z34H`C9m(>Br;~u~5gxkXZ2a-0=3JUHhBL7tU~gkmMvPV4)(H)HeMV;I+SQgq=n_^& zrJU*3lZ@l6D`>b7=#+l=90S-G8LviE@D!W9e{Vb%#O z9+)_-PikEDb|1e{7qj2jXT(*vt^_wuX%X!>0i}3Y@D?a&NWVgF{bJ$g@gnip$E&&T zmN0^-9(=DmVl_8V6HdQtW_PBaavH+ZXLp*sC3VAaiow(+)Lfcn3@*PHyNCCVi{p3L z{!A`B2AbRkKDm-s;!J(BgnrPc2^$c6<4+R)6}5`RI(Mthx^)IpYDK>lNx@vgvu^%B%xdnO3zKTfQZ1d#{p{^GhIr_ks7u zOSVX|-#88$0v&EXSG0^{KJ!o4z!R2KZVd z4LH!7vB!xyp&QV&@1$X4IsrqPK&8n+0VjdO@F8$R>QY?9(TVVMy;~NA8&0YqphY&& zqJY+gAPgXs%>vfp4FH@*wd&c*JX}nOUYDa0LW?IL-fCJcZniKV+c!;Z=zet87oZEA za7B1?5#8Ss$^*~50Ga!&X%LdUfK4Sm)owxliw0*5RyElgTSiB?iH@?ZeJdNe?EjAV ze6pUyIDZgd-p>i=|A6@Zm1t=5A4i-T>oz#Uh~CF)EJF+vtjWTp@%l!9V7Zh+21E)$ z;-nA|1}mADo0n!bX;*(+kX{niz(=&Y2>B5(!0yIoIcP zmX#J&8kuPC6zjDw#qX6Pw$+5p8=0x^6Hko^>pAPM>3lq0aBz9K`Q8>sPv;rACz1_a z1`>j{Nem{kN7Q3i9e)V4aiMXZ{n6CUsU7`^9|A4rFJ;mFu*9E`q~vg@1TQ{x<4MA* z#Hslanx^LBSLJ^ZXwY62u{iwhZz`Q^0|Fa^0R;+=;ij7Czrs<{_~^zvaOx8-lF1!B zz(S$xo9Yb@xy&LP-qR);DC$W*ts3f)$+am_c7o7yx>p{9F=bpZyaEVvze6XimN|*B znDL;WRL?-?p?fW$q85zmJB)q^N;dQ~3eE`pD}=Nkq?e4;fxfCd`)ZkU{}0kj`Qgtd z4;f`6MGF2wdZMa-bTN@xgaqj7a*3nYaR=S@RTqz;xu+2T2M5Pzarg3 zW7%6>n5j;rxYQH`Q#0)E$I$E&W~mKG;X&<@XH*%49uoA4M;j@j0bjRncb+x>89s6$HCzj@iat`SUkPKdypgT#h~ z#B2u~{GhG|*~Np@1jy!@jx2E`iXoj8z|imsP7~Q?Ko)z04cSncbxDUVIib&5Ud&$x zb%F2{xN6Yc^{p;50<#CR!g^)nS>LMsrHljw4Wb}vE>V&C_~h=x>i*nKD^?6IZ>WG}kBa6I-U48vq;Jj{B$p6)&h#4ZqVBqEeO7-^%3h4P>U zg8sb&t0<{@F?1=a9rGQan)f0QEmiuSGAz}!A3Iu<;y@Y!R}h>t_&7a#JTm1`e&A@D zz#Y6UCeprcygA$ai z^fX5el+1fkmOg9atQh-%U25P5?S=oKJ&8;E+MQ-5df}9%Q@}kx(fEmim^>ZQKdURE zuOMC!>?7vcf*tDlh)$aZ$Zn5s@&wUX5DU$aKC*trL6PTK7(gXb=S!V0oQ@W4g|Y!> zF3~uQCXR)wYouOO^+VC{)z10CznkeOPjuEZ)@{c_0xhX3%=Cgb0ZS9lGYX;8YvBxMX6>6&Sc`Hhed;T4u$bB%DNA-Xl;w5klrqT-iP!m=C%o! zA!rDqP_Dh6AM%FBT=K#ni$cVLLAU)|+zJ(unh0>c2)Nml?!g8?sy7uBTCbe!{oR?e z4xPRQZnBw|uD>HQ)V}Gr^0)=QX3*z)&c%L=_emk4_dEcznb5E%x?AV>$2}VNJD7S` zH~wZjaJLdbL5xw0$rJ0KP-|ub&0eqzFD`Us$9kapB79B8X=e=*?n2m!wE!pFjofz< z=tu7~UgI`qI5;V+l#?&WEea*`*uN1+K5z>Q}owK{yMdLW3ra-q~ zTYp-lfN})IC>obv(e0CVt+mq3R&a2H@Z2*bo zgHO5*aH)dM;v(;|P&_dpH;U52F0u_Ph%@Oma)c&?0h9gEhF~8^51sp?GrpW*ptH{V zah6-)Bzg?DgM%1%FQ5r%u17dxG6PirC$@)0#3?51T;t;-WXn}eo^}hX=TVbebOpc0 zAINj44E5?Tk*zyyYUERt7_1^`rS0_%tg|4kIBb@>i+DjU1&g7(fTM^(#+5<(TrYKF0Lwa`pu zju_|8Q)dySBdH^UL2a_rJT}kJf#^4%c(%mHzetM^uu| z;0lUxNcF@tiKf4q_UPYai%C7knmlK@3_NxwOu2iws|y;?9dbi4?_YFA8vDCi^g#Ety&8LNv&>X7KjZ!+mF7x=V7VESH3DBx^2T84JZYB7iM8aGxK!%ymhI$Zf#W+Y&E#{P(IZlbS{!>xcD9aA|NE+~isPpB=m0`*-A%AuJ~B&|Yw zL`ri|#_ckX67wB%%FT7&M)U+u^0to}%6fxBN1QpKjjhI|s!_DEK|51me!RCkUaZ_7 zSD`Z3#*x&-JlUdr1yy-U2^JoFM7KXAv)}Y#X?19YaWQF(@3<6*oaa@Al>>)G-y;djmkFQIL)Rc&~ICI7>hii7EvumTuih#>0+K>&2}3F*Kp#>&K`k6wzRw$)4k` z&b>zd=M&W5n;6X;P)-U16r@#{ngq=7Ak64nCML&`~7@ zfp19+Hkr*>7e=L*G(5s$NSDV#`NyKHjB@Rx8@heIPVl@rPEiUJ=ua1k)X6%ep%T=2z*xxW`f^02v4${!NJrYoqqDt_#%wPlOrdktQ zVF$<;F3U^0aXjZ0Qg`j95QiNy3sk$^rjUn@tc&#=cn7KBsCqDP6vPywYIeM>sJ|(Yi@~tjUr6+c4q}Nn%XG4QIfQ-ys#1S}F!CPt=CkVC*U_o;v;Qnx$z%u-hB2rI?thgrE(QvL&^6%GMK;ty z16l)qRXED~w-7uL?nS!7A!Z80ErwtM*tDFIf-XBqhxh?#=Q{GESOAmktF}!WnB}*H z^oP+{T1Yq~#5*J1ymGC{c@B`0j%0>pVi2b4Sy%?eD9~S@HUWVxKr`^)+#QblBXE=N z-*PDKaJ_|0lLN>tmKe$Y88nEW5GTSi2KVtJcTdIDbhNietagW$yF;MWc(eV>RpNky zb4>E8#1aH#T42;4VX=uqn&J0kI7CSTojT+-=|sZ=>k@zg z6&PGWsU2)$tu5#=tCqDfgNI6u782pA(1S`KeH>4#8wc&nJL?Ket6(`ZPVUKE6o`$7 zZez`o;$2e6S`L+78{Q1vLYXGkZ(?@#tpAJ|h|#j}eTtphU?@&U>^$*EzsQzOlCq{e z737xA`6@=1t}oLOq(|-*^(c+@M6Vl1z2hZt`oce<+eBjWsszznl2oyr2`t~fTjvGw zta&h$e|tC_mq{QN6OY}u+-2o{>ZY>y`Z-Y#uF9qr;#%ERUsP)^h*Lpwe0;F(PXA)} z<&4rR#bKT~C?H@eaJClFL{w=qMnbv2Vkn}22&H*)1j%&IUrAgLxJ!X{p zLWy47$->>C#1mrje5G3~Z>1`!B&roqNgLLDE%@h>0TA6TWO3u#BHX|ud8-6yUzn&QFmiV(hKj5nmrG&ic{PxCk66ty>b?= zF7kOX(`)+=pso3NSqi+lbg%58H9-5cfnn{q|JsK?7%<6o4`TtyOb3^cne17BG&d8# zV}iOOcRv%(o%w}G)UnaVvgHRv8A8anLRHV)x)U2fQK>(3n_2=k^cY^48b~Y;@TaP)~ zqt7Vz30)r~enehXuGT)5BqF-)rI*iFrXc#lF9`S$T%=b*hN+S?C{d1n0GX*3-B!y| z93Iy`9|AH!BqIx%<;$3nO{?JbfhQgw8RH)zc5xdkstK;^44a%YcV~|60zG2F-M(@e z@$Im~MJX^orzfky?Z3A-^0AGF3{;IJtcde21yaw}>J5J_l!9FFpC|1aYVxTWFWlPG zd%b{FSld5Roys)yuG-e`&R@gr-kaN@;BPP6ue*AL&Tf=cCc;d4ob%76Y!f)TBmbLNtzXWoiyX-7Ppq*==MlS0Gc zV&zkSzpkN6J>Hz}F5XMBkG)YD3`ML%=d==Td|NYuf+}$X;OHD)rBMQ_pE^s~p;HWW zi^j4cKBHkXlW96lzf>zktwr{mr35yGh_$7Z+Js@UU`87o<)~LSb!NFF`+(>;ipbV( zZZt7P58X@qZCc^LTmu$UW5Oc1@eig2FL!ueg@I;&a9^a0T-iLtN&2M8l@C~@C?V=h zhSWC|x}_P*8gFZ9bjWXUGzd0I^?ZRrnktvrwoyqvETLTkJ8^4BgJVj&w4P!h^=;yVKiUuyQ>ldf zMIt@jtR`TN5K%)m+(DyQdvOk9>MGW}syf8I>gCc@!EMPz%>xL?Mo{M=NR=%DDI`#v z6qcvsY1X`)$Fu=j^y}l5?a3RcR{Do~H4*v>%Xu}@n|6S~{HDQ&-s@M&UAHf2Z{o4+?ISMPu2Rd90p{t=$KVRB;bn3@M1ZuDv{RE56Cmt z+N@yd6fkb!djjAe@8a(Fml;(<7hyb-Qf*q(wwJF%5e7t-tlJzTy2cvM%2wjmb)E)0 z{(-}?j_X^Uv>kJYaYVTXaoK(N(+xVVIbkj7Y}f}eCmy#S=jEf;^?Q~@GV0GIpPV3= z%qZa^H04;h*`Uws&v#e<-wlvKn&-ltHgyF(g$+&NQb>!j! z5hv5Y1pmY2Errr2LlW6`K+~mbE7sKJXNGUWBPVS3UYXS-iyilg z-uYU2X~S1VY0mA^iy3EMr}9ZhQ$zw2v40zSdbaX-&*arQix ze^X9VRugZ;K)^|Bv|c=dJy}X2E|i!*O;p|9^W0se-{Y ze*X-P{JGJ8?*9|Y`yT@hZEZ}5{jW5@md0Mpjvht zBCt3E<18jpXu4zPNbiCEb3Wqvi77bCV#nwD@h_er{8#_tfBdF@`uTqU>*GJ-2eoyZ zbuok=DjX&iTndyF#2N|&$VfmIr$;az&H2p;^DZ4nay_p7;%cr=?_`_Xb5|yQzd#^> zh!${M`)Vqaxmh?oB}p0VYe7+DW1Bk4fmDj2JtC4dsJPnQ0_i2#@=ZHT2gNuQc)7gW zVj)DTmPuU|l(|K+aK;HH7;_P8v5d$xDb1hF0wzJJC8;h|R6q@9`0BeHwg}(vSdR^2 z39JJ)j=H~PrYH9v3YMg!^WzbhrDcBP^(~rGHBYc`(_v26W|bHuCf?Jc`WetesNI#P zzhYO9WP@mu5DZsfu%F%p{cZW~=HP)9Y2g|#b%WCwdh;ZQ9u0Jw+o;0cQzu>VMMMl`o| zs%HMFN?st}N?uGF=y^FCj$i0jr)3lw@GZ>VpaJ6Z)AQ~ZS8kAGP(hFt_ZJ}ncD6Dh zfNV4z=YDWDoaW5ERR+cpuaB@xtmFLz5B>9P)QC6Ld>{>XWCW&#j^GWIt}g8(Jp4nm z6=4b^t?3maVf*4%RTXdepEM^!Yx}bJQYU7puRlKfvD^WPw5H6Kh71<$*g@DW220b? zNs;D+8b!aReA-m&s>G&;_e<)fYVZ1(8tuD=lug{K5|xDk4$qFq(Q$cr!1(wpR373} z#4=K6w4mPCDHFgyY@AQk-M&dx?ry)aSx*s7e#^!4?0kDXhx!(l2_`&2#HDJ&MHT_4z~Esbxwsrv?X8)zxiS zWJ3w_d-+sbJqU;>R|u^PxC=dHX#Jt+>|AzVPe!8^JP4J5)wkQX$sLk>Tm#Yw^8R(i zhI`>$bA5`$Lo`BqV0c#6SNff^J`ORjcv$B$iyqH$;+`($^fhKQHXQURT5)mlR_0b2 ze636%ZceiU@m|nYv1R+m=_{ZA78&x2_1DY&SKFgghfIPAQ}L4m&ll5WtNnFZ1zkBd zcD@X=4@zU~Vta0oELuu69i4b~s;3;KA_G;03Y~J(`x^Z(b_y1Wg^1I{`&Q&pY_9ey zDWt_NJ6Op}trbrFrX5UNldo}UIQkM0n#W7sI(W101naXu;o(Bbwd)~f%qdgR74lIY zl^H#wPbB7AWd%%yMIzp{;31aQk<0SXMpK?rmVmvZBTc9t)}+dsvPQ@P0$nL6;2-}oElQ|TA0 zt(dO};zFZy^0|k^3jVmcEZ5|nVlO+OT|VECPWc)N1D)C;42Mu9A9c{*HWp_`2?p@< zsm|1`CeKm57h8Grjfl)(X+9Dv-y^70z*#t%cr7au9SDERx6h0Gn= z13Yc294o@6+UvwDA#qHmiL=53$3~91oxu?8HhjS7ysX7>?eHB)Y#$YzYA7VklW`#@R_cz5hyRx40C}$9e7#@wQ05! z=kzis_ zv5B<67CmP@^y(y2a$p|ADim!UvXzdp$v=tn($B~q(yW?VzF%}m()Di~4(q}2*6{lB;7gC2jfE#CXV3Yf`SmLZ=g(`lDw>_Wd(&Ch zw|*D!jyBT~!nVoj;h?eJ!2ss#6bJeKmh5e;IJ`S02gn9et31x&s6CrQcUPQ8$P-60ac&($Iou1F}AHdfL`A~8+ zxhv?-`llG?`1%fF%o-KhF5T(v93*M1IvvGILVXD%t1l^1w?GgIX5OcMX+|UiAx0&f zWp0z-k+8GeM+O{39>38sDfDc&Lc$aeU_;YL8mc(`oCv9o`Hb7*Ri>NWUCJbrhuU2E z9J%T7F6iu^MGILfnb-vS5kTRAluOdF2G0XpPF1xl5 zkSIV&6MxV$Xn+|z3(5ksX15v6slBAw+cS=N*h5i8fzkGdMsZmBD>GxHbwwLuwEVz2rpUjM{RKH0TFy&-pdaOpjx zDsjEr#t?I}e(Ktk$YZ306I8|%5wpMr*^G$Pz9mF2Qt-$uHaA#E=kFOv0K zO~Ogh(FE#@05PT+83&gqa@}smF(T6o0rN5sV2wbX` z-B1aFbX4~65>|4Z1N>G66y6oM`7;psMR*4?z|V$v*$;dNQI=TOBE*XiP8Yb6^}-L9 z@?)9IyvB=}n$8yOqL271JClP>H~PTSC8AI-9E@ zY|*l<)P2WjEXEPoehDVIS82Njh$DbXLP9BkI=qISW{vz#fu@GWl}d(811H?AIkdno zw>}zc;x^dTPjJ%TPyp=^n7>_sk%B6p2Xt958)Y?Dlpk*-}*-&)7Sk*^~5x$c_v%_mFc~U-cxHkx2 z%~f%8h>hHz?9KP*;{^hhiv_Ob{VahS>Q83tP-tIq2#O^`vFvL0`JW zYdE#EAtXsC{ALZfu7uQ0`G*~8@%kG=oox*pXiG|@k+Lb;NdMS@)Y>z&^Q%dP57&Zt zACP1Vug!4rnRA=Q)SYh!$#`v3yB{0SDS1g}JHGxzvtH}vzjbifS)=F1{j5?rNdMJl z^&cyhld-j(mA=!zYA@BKZ07|K{t4;w0xV$RCqP1?JOhLSE$sv>YkDHnIH=IMPKS_( z&m~d;Qic;eB*YU%DU+nshC)?-m_AtiClh#c#ePem;HM)`{BqOXi+S74Bm=$!i9-Zw zom5C3u;Ree|H7(J@tAA+4gJV`)HS?d6=kQ6M(TUgpEC)MgJqfnH04G}9Lh=ZWx>SV zB+$wdYQz&TC;)7S7~pqAgGWm*&JeVKz=+=}65W(ey2>9?BEffobsqtRxuO$^bLOj1 zCSdeCf=FfCEF|mv9k-KkPo!Wgb|*8VNP8!QwLk7SxYWGXSuZ8W;4pwg;(sT!(a|Uo zhX2L@32@Fe)Y#l)9ZR7sf#ydhe>*6Fw!Xs6Nz6 zm@>VoiyvNkA=Xoy{TMIw!Ax2NU`7NHouLxv0h}|{hd+CBBSrN!#sZ)&&VQ%b*zx} z`KJsrAYQ_$*c$VFO**f-igS)4!=|JmyzEN!K!asP*UeAIJRGP_x@7aJstBLiovr!s z7=^Vydaist;vq!*cg6b8q5?we5}V(>sUlbaCnf*~K~`R_nEI?Okk(nzkj$qC`W;?C zIH1+`H^dBCH5)r8ht5Cyx!WIY*DIf?9~X-?%)DQ;-`g~NU7MR+@!`+kEh_^hKVQd9 z4pj^v74BCh_iu-X`m?jUd2yIC z$WMez>Nn2Gt@XSQQ_LMbTyA%UXU#iz)|r?7jbsL;Ye&I$@oYH_oO)6HE-EoEc8M;+ zA}@L`YM(!_Z-~ho{j$;}^7A$Rl{q}|pj*$Cuk&!gj5Gmr8T$=+`TN(37;BbJ%Ji-g zRxO}1Tt~?lu+i6TmRkLKS0OwJ`y1cL^ay{E%*4=agaDnms2W zK^s0qquJn z&zHbF-rjFuUS>nHpHKd^W3s={1MZ1i06McSu2A8Wn8nsfYjP?;yF;67ivnc`Jj{mF zI94eMdae!j#n|z3{n1n~WQXv~1Ga~ym^E;&av^)0s|5K@ygd2V7}?? ztGWoU@J!;T*nl00<*zG2(a}A9V;EKu$d{3R0ohoEI{w1jS_|ZE%K+*}0t4qgt--%B$!zo~0v+9q z3Ry{_1wQhk5c()cub-2GL_n^T7U0*k$LE` zI{T}^qfr2y!(01t3Cf!prl!6*_%B^m^DFt3$Q|L{t`2k0s9gQlHzU4+FKMo-RQ;}| zU&Y_KGf@+wKyKarcRK%Q7V1VeKjicj6KD&_jvp79h6!X?P7<0ktR%CBax02@f^m8^ zvj$&3kbPlacQs9cpklrsh^?~Xf`}?aR@KcYI#Y^CMol3LsXbj17#!-dSkZZ=Wyw;T zVxzz-jJcEs9cx*bP~s7yN-fwPD878PTXq77clZISU`Mbm=6w6R~AHz;azt zFij&BlGwROP`U(yS_Cz8B^I9!$gh*mszGi-qk!WP735T+E?EAre{-b#^TehC7MS)U z0{~F}9t#e15#iZ{o-$7BTC>B=ubA1M-k&22?#CU5^ND9@K zA>K_j=4g+$jIPZ;b?0O=RLCzD zthV8RY`K*iqqbgI%tIMR#yzB^L|v?=t0TRJgyFM4Qy64E7&eLuE3QYcY%to(WRAF*84eu=-VFmEt0awnO`}r#Qxk`G2N>zSNXE&RQy=wOU~+UU z=WWFMgat43tVA>1;hg=3Z~mjhoxIuur&{==QjNJ@Oa4p7u|_yRwYUkTp@$O_IqM`X zwiR>FD8Osm5|MTZ`R;WgvarkyY;mx)HSJ8pQh|#6iD=Kkn6LnuBq`bkWR?6*Q!#MQ zx_#G15A#bonHS%?oElmxWhvz?lFoywG*Z3JqtwxHei8Gdw!*%)!A&1+ud(8nwk>$u zPGdkTV@1-4+Xas1b-D6@cSieMsMrWKG6%JCvq_g)el9B|Rg&YjFi?A^y7|m`?>S;nRR_;1^n6k#7#F=k$_O`3{^7@@kFgZD;RqY)_tOU-OVgHP7TD z)|Y3EPc3AV*bt5AbrH4-v2F56*;VCieb~YmUEfsK9Vb{I4t7bn3}4Ud6kFQ=)!11; zMb))?d;kgQ5)`Dn1d;92DMw#)zm2X;5(PTj)Fg$C2@>VWKN^wMF}_P#Cx50 z(nzpuap@z(>#B;*1i#bDoJmI1Z~KMIm&LM}1qC`|3?0vfy+v6xVxx<)_u0c8&g$SV zXI!jqJ;oM4+pG0+)zqvNPmA+C($ib5G|Ag}nx)hR50OJ`#+!^ULsUp2Fsm9uN*L5=i_L$`@?CJ&a>14QvHYZUPvRuTCT44^BO^Mgys=M& z)U5CmpR0ZL%dsd8>%{lZjXCt%Y?-~dJYEK*MW30+y7HxXUO^Thn{5?Ouu|Yia&y1R zeGza%B^BC!C10V2IyhX^et=0h~C!m zAta<~d*V$Vihqpc9@V;}zO{MQ7&u9og-ujm_T{3meYf6^T z;3)6sA2YxBg6;Wvc@=y{4VNm#uK4i`)B5ywY1X$_4ZS|Hne-DKNl32*7O7bDsOy!X zTwoKPlG`NA<&hxdEW2$Pt;+bc+He`yEy8uxx2jY6qsL^coLr&)ga?QD7v4eWps6dM zj|b#?HsS(oTX;8cvXzhPq!Pd9C@QA9XN?FPNwZj17QPwFTA5e_Pke(W9oHOPUYMTV zQXj>p9i2bar7#XQ$(bt*Wmk5U$&j6B-nQ_|uJ)V%*Wyo9`onFeD!`BGggh(?|-H4sZlTVi#+hoo67W;;3P=PT4 zo)~v%#>AMqpzlo$3bn_>;b39d5(dALRk27NFUL?0G2=)wjM! zB}ZA$pz+L(p)wtcYXVHA}>HeZ>Z6%-4CoKIa17_gchplL5C||J)>c!xGvl zY+^k!B!ZgJd|uAmV2li3z2P?(oE^YGdZIBd%r#CR7pW%_bpU+Lf)-7T7B)s@0#KbZ zsScyYrQ94_;x~vFM+Cm=4v1hG=6Xq7N%ussw^qG~6Cs6j^g(~FfE$KwL9hOx%R@KX z!`e+{O~^!Q*nTtByWkUZniCYL@`Y?V_)5c%yi z2=G|#_0nj_<7lpzDm^M81=#$gn#lF|V2^_2xpV1NJ$?Qe_zXB`iFWt9KO)&t+YG~o z1!=3M5J6(k3l2v{f7q5TSE z;=VO{7fs8Twn$iD8>mh_N21;V2-W8=n_KZ znZR3bk>jE6+lP-dr`Y1XxS(s+^z2;;fw5RwM|*L;CMDDRM2`oXZua*}!=z2$ky@r% z4!EjEK2y~HD$P_)uVC_+Hc7Wx>w^ACbFU#E5Hn>|75^zs1hb@|j}6Bl;b2@%9A>COO3)YIjHf2x z;=9B70<=SL8hh?Ugc5bW@)EZ;;)~jiTz+H(Z>b2sz;6ve4_Z)GE2Pc{7sz2SP1}vs z@8Ft3hL2QOpnqepr5NZVh8CMOsDh(48^ zONlMmApPA+sRTiG*huyiDFrU!H0t!dNn+Y~JnbYFZUHaaP zgDPk>bz9-{=P6E2^1GR+PArcosmSkQGC|;ZL_0<79!zM6Ugwy^MHj` zpU%2oavN`8!M6#?C?Cv3ZRJ+$Fyc%nML5aviXMfXr9tEwGQp|nH0*4R)B6j zw_mN(9?f9)GJWJ$Y1&d%sG+|L(@LqHwkOtZNI0&R@}anOZ`r%9#IMgXE|%~LUax#S z8?QKI6!#9~z$9_45%zo;@BO^zsgGoS2&b=P6Tf>Hn>{D%(GQNMXB63m>$YrW`9!M8 zX$0BRm1fT@tlkV)kCfo#6`U{tTO31oDh0Lds@AG1>x{%Fn98lyCKCk947XnRG&O>V z#ve%@`=+2P0_-Ya)2F$lLT@M5H@xiIgSRobW~G<( z@JrJoUt4XlQfDd3D2XBf2`)vFV$)#WkCI*eCAWL+Co@OVh0hAofOzX{`L+jLKMM;9 zlqxlA;`fLdr`CbCOtS-PEZT z+oup#=&{~$MiTi=M>!fl2Se1WUk2M3GP5(Tlw$=WC-(FH+@X$la9S*w1kLcA?=NRw zjADVVmVOX&Gn4QVYp1WG8yh2o^o3tqUd^qbA#)4YUw}YfKU*iEr`rubO?TcOVF_LJ zWnOSc9{SjyZKcj{%oDR73SFihUG8{tgIXneGb-H;SRzttX6x+TE`T2N6cLA44rf(I zm>`dX)eS<=)Qh#bm7?wP!nyn@ZCE~@$J?XM_IrbF zj;ow|SUx;^F76Ptx1S=0yH>t`@A_=a7uPZR^Lt+;MpMk?K%@c(h<9M{tVUS;>nE3; zMQ$wHmmd3*Shh_*vKw&rk(x)_1D9Q^A9 z6+xFXMWvUEj^*iBIt@9il+zH676MUWU8a89SE(&y5)a>-Cefm06%TWiate7!PS{r& z1P#@zq|Ex?9mae%OS5GCBBx{EiHDPqt6)5t`tz*leV<84q&|M(ntGNL?=(_sJr2yuumdp(#7N03e=KU zDh8=~u8{?6A7ih6W%p~eHoYv19(_eyAnq>lkmVt76G3;w6CH-bJoIJJ>gS@O8%BDO z93cL8fuc(Q2ekr&-`H*1C_0UJcd=KMqzl7qVH)} zM_!U=`O=#)d<{EX54DcIyJ&GM3k)dq)Ck`)A@Xpl5ei)LE|16NgYbKU2p~^E$jCu= zr=8z=fW7DCvK5lDu-)s~Ap2yVq6S-0du*vk67!FX!5mu&M*TV7`))Nmj2-79U>yOt z#Ufw%Y2Ca}&(#f1N0LG>?;#cG)M|92MY}c{%uUOEzOX(5d}kQg^jM_PGtTF9_>MK0UbMzYHjP_H! zJxyn331=4C+M6B9EJ%|;32bDv?p{B&pAxGbR<2|OY?S+37c&pglSIqX;2FqfpS3?x zC1&ULu7As%Jz1G-EqSG1YJA23BJdP-!HB`1Hhe~VS0h`!+KsqYEI&@R9%DOEXS&?Q zfzi5ZFV{S+b82E+Me9R~R^`@aM$Gn=j&(f`5@+LBMq;jaVR;G{KPgT=h+%ZT>s_v4 zPmXsD^9cF7ox|EOUEP=(ix;43J3&& z6c~uq8M^4o(8|gg=aYkxV{iG+X9X*%*FNtFjU+8pFoo)?5D@1{YQFreuspla#%xwL zVIhcmOp;%R2NCKxCa9Gpn5`uR8JsT(q!?Y?(urQ*0ZfI9w$p{)`O|?Ox9s2lyl1T; z4Ft*|wme3K>Ld{R^>X{i(An>%vmC=`>o8Cx=p9$WQ6XxPI-E7uT=+6FecH-N$mJMB zKxH*dsiQc>F{|2L_Li^Vz})9TbLVn*mYNn$Xybp*FbnD84NVk#lOo}B^YH(=U(ibQ zZBQq805ly304+ZFOT+eOznpCCKvt$M(8~8tZceiDPq}+~pk5-kZ0fIRTUfm>i4Xw* zTPX6D8um|`3Q$~8MWSK&X&^rXi3FpSJb^!A$Lh-xf+S`Km$ZXVG~;RC+-8j?0xyc0ACACEiR z-nDo!bw}z_+Qk%kz1$0*5Zxk2R3!}qhdEjguh=8Dn*mhozVEEGJU&{^8$S7ljxCX( z-IdKpc&`G@)D2kmF{V_%_A#luHs0K(BoZMpk6N3}7aP0)rg}S8Su(1W1Qa>?(^TX3 zEw)#ei@oB^ecY#9(;o-%!AuRA&jqI0DdX5{Wx#}2M19trm_xkzh;roam#(c1od|A3 zg~KbHK-47v^~H-UaaSUilm<-R;?B<;b^8{r%6$^$E5_)e7wV;zVV3cr6yz{u(NW9- z3z?qkQdfNqEtWNw_Ae9&cfD6RpTWUDR{Z+TK%nJv#59G~K!nCB73sqr<;;=2x@q8O zZ#1f-uua6BP>yA14ysiKv}xWlJUt%X&Ztc(Vs19X1G3FP+Zi1ONKF?q+rc-(8B+e) z&*iGcdq1izVhqg9!`Xv6cS{#Iw@N=@tMSYfh+MrhR%xd%NkWdQcoS7|-z78r%?N*( zxj$q06Hg|m#B$n`Pw!okxgQ=WwMoCiKFKu+B9$JWk=Nkj&&nK-sH(10nOD{$GvURn z5=o{J)Xi64i?pQZk`P@I3S|IH14Ir)NatRxxGB(XG?b2}Wt&{u)Xwi0)I4E$OVjD? z4n`j38D12WP0SWf0wHUhwe?I7>>7G%+mr-Mhz*2}2oytW*^JQ@DP72iOFe(nenNZK zHO53}Z_^{**UIlV@AjHQFM4TVHn`hA)x&6btEef!cuifKB{S z-g%7R6>D5G7t>E1E;xm@ zjk>KBPGokT0r@J*8w&?LgcG|)&m>KGP8){p_g=eEB*axd#MZBVaBlRxyxw{EXt`{8w)282&&e)4Y(G0#SgmUvq0a<1U;Y*X8*J-zjWyNFMzSF zgX!(e&D)e-Uqwj0VUzjYvJ8&?RYpJo6sg0?Fp_8q{wf65eYZ=Ot zjV#I^hprQdPyQ>$xMB4`7o*>L^q=}2W`@Ss(u~`fO(=#MaB0c{eGn`NRj>U%uyN=_ z{f4o#ursx?uz{t(YQV3T=U*+$PXG%2MKS+pJpb1u!LlCx0zrRg`FZ@ETsqQT7QbqrYF?O$QC{A{THIv240 z|HFm*_9*^l1AG%f6k5RLFS89B;jgB^x1;>cvF|3zIrQK6{}JV{{(ZNj!2Y$bOV9jj zS$<&dn<#%855aO^jfZYHi+^%%+YrHmVBJ1$Ks5q?bm+$W0~P~YlKKXtCHZ&Ee+pE? zVqj}A-e7VS|G?a;%m~YYt?+ZhNznR(bF1DDEa&<tj_!fMBohDo8MIDupn5$ z^9^Xk1s3#={23Mkt0laFG<(8AZYc|IW3G2huv(@YOpwn%m_PSV|Ep|*rNQbHq)$ literal 0 HcmV?d00001 diff --git a/dist/edg4llm-1.0.14.tar.gz b/dist/edg4llm-1.0.14.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..6138b743e25c5d217450f3eafce60d1218142186 GIT binary patch literal 31317 zcmc#)V{;}YAyUnx1n` z6Gp?p1j7WGg8@xV%vo)0Z0VTj8R?l=>0OMRK(2Z`o$)3T8Or-ERYIax5HZx$+F{*l z1QB6^NJ$eyU=(}4*I0>^QbC+DQneU+tyt|SL)O>M)oM4 zpU|+`Hmhq_Pgh_%X+yQD>|5oRBMn$kQ_<^zxE)>Kt%O`c2W}TO3{HBOHw`a==+|AY zX(3avP}Qg6Og<<#(+c3xL`hEqqjICyV+Ni%=PD9KJl-D zeXhOr6q*C_7v~P)R5#*{Al7ENFT?Qdk%6}h^O`zs4eOEX6PGzos;bLx;tLZ?!Cr8*ox*(ocvb+(5mYio9NJK+534Rm!3HodC!JGxt4 z@h`i54#{=QaQ||&t*G18FzlM(w%OTjt?7oIFbK7*uGSj#!q!yl>-6+4hp6vk>wLp~ z4Y1eI*VP%Tq#o&vajY=Z3=TzC%{3iIUsiQf{al*-c-Uy+ z?rQe2eV?TEbp^NT&>HyN?S3^~RrLn5sT1a}-nwF^J7!e9)!yrh4yYcYw708J*wx+C zt!X;Ww$7GJ_{(9hTcweZb2oxm3DMitJ83PX%wjBF00Z!~c2-jxr>~)}6Vc0lV?t8G zv`f=Drg>kay%s3_E^7v+d&akBq zWV52;p|(vcmrBQt%Qhx@w~|sFXR<2$=XNbRqIQA6Gr0XM$u#Se685$`I0qOc0qaY@ zEvc{adjnui?zf@}No%hk-HPjd-HVWf#5d~l!5cs8$nzo6S=v5-w~cyjm|okw^Gl%& zwA4bhj*7}>!N99~_H{i^DY=>Si+cz1h40eB4E>kX&^347?(d7}5$97N#0;q4&z+G< zY6Bifa1$#;dCw)HM8fS_xot8{!y)M&+9$2bc2oyWvPpn7o97 zo1g%8^AxNydLKt)=UBQ|FI0;Q{4X7Wl=J+pGo{bD{u3hkEqY*D4J3Zs>7akmFl8?k zMH9&Lt0_?nZgWMA%njcv^8J)i$~M?`Z|v>zgb2p4$WMNQS2q$0u7Ii zt9s-VXOb(5Zmqa8CW0UJBE7fQ#s<_J+jTE|oCR#0f!bGt3%ZX!$4R(tBz57ccGV%A zK_v}rX9HUlH^C-wKX2_Qx(QVwd~wqEEns>M-J+WpWE^;{2j1h?XnN~eH*OLGRbOSWMU&YETwT-XJGJy> zTd!Q+a2$fqKJz#(!izD825U=%fP4w(Ao#dj&blY93WbHqbbno5&mk9=u9}_F8|Y`~ zWz@v}Oe>d*ler`qzwaXPmI#eo>fo4BP>A2;vMNB(Ob&G zNU~7%9u#&7!93MSV_=Y7gb2{I9zs%EQxEUU&&tf!%gHz3>|^9`2YgL^eRVvtbeM^1 z#V#DcCU*LQY`c7~sV`$Z9~Yf^ciq_=nY-oA^ztlYL&#&bh6=DRilOi9k3O2u_BArfk6JqZzIN)LwsgoiKiW?XNkM)v@ij5Bw7qdl zzFB~nWZ55(Zy)_(}|UIkZIJdp1tbepr5gRitv`-Wb@)kwaue!U|Q2O zMkIeK5%zk5;EpteMM z%T!fjUOV@~dUY`FQh~dxE73b1w&SMEfmt_Euj|=KHkFjlsuHn!j2V=Zc*_FDoxIE3;z=a5)A%%zk6=UNkM= z?m*tqAC#M{pBdt}3U8au69ccs@-SHoB|97ZIaf<#F?ll&Yxr`t&U7{#Ex&w$;L482 zu8w3~e%dAKSH21k7B~#LAOwB)%O(x>EJLn0@&VL%QcEp8vEnM>FH|18B8p zmo%qS7*wh6R=eCF3}zzWyAVOEt{cYL!&@~#3iC`puIy)u9$lDk65_y!UYzl2Zx`LPWCId{g zKz=Uov^=_Gi@AJVvS_jnY+mFhh@3&SZnw+f=H0u zC+?vY>=|6SpGn~zpcas}9clY0uqtb=if;Xb&GH!XAvrD<%)Ri9i0u;eJy9q%Io1dh zl^#$ZSl&cz-94avWHxO=%!$Kfk;02p$5Mq5^oGPm`QT(5z6n*Jin9Z`{MbuBG@7Ax z*?I8G(#zZWQTBHTk~WM`m!u&Qgdx1QT(?O}J6bFEO|~M}zri_M!MG0j@so{L&p+&9 zmZZ0Q8~BW>l-f6;^KRO^zQ28`C*G0SBQnXSzR1x=N1*ISZq}PjLfcP4&kGEg8b1I+ zV29!0zTp>fFM`FOja;xhXQ8jg1B}hFs9h4{X%9guGMCUERqdzOubyLuQ05KSuGL?? z(ZG?kx`D>VLb(nId^cn|wEFVEx8{`yeDs}avl>`$r#7|K62ayA%N`PX{}CT5@lq)( z^^=C@9|fQoiyK(Uke5R8STPt<;=*9#NRV*q;BE-^J)io*pV$xNNW>6;#ST&^s@V8K zbojMMIz#3;aVc`Wq$$HGcU<<%2y4`4+fK(6krO9q^WOl3<-81R(GMTQQvkSC4dxj&A=f z_pM;z(W_G3a+m~n0NX-emLq0NmBJV;GSohj^J`xBkyl^)kR9X!W1K-$H++0|RcMNtOzR+Q^}i67O{I zJ$Tma=LirE?T4&N%J4Knv!P*}%4@El+J`Zz$>z-=FM5?F^J=Qv==HK2QBI=^C3FlW zC>Ai;z31H5Y63+0oKRl&`|v$__;vJ~l*F19L}6XD1A;(&#f0AuNE33cG->B*1*`6Y zWAeGW>ZQuNps4*mK`>(2ZzSYe)9S%F$F4f<`kx8)qe;qK+|BvbFq@7>$bI>ce=y> zDFLrD_Skmlu5fHuSBK_v?`*|RfW$G#Jgi<95$V@Ac)^}QIX@F;qEXT~1Cv^_2B{P4 zG=s>3u%!_N8>T8mf*3rmRafd%TV7sQ%t82}10NSLamPUA4$j@yJ*s78EK_EHOK|;) zM!ZK^bgylba$ryXdmW4-eGdd0zSS|GsRJXP5Y!RUYC$--u(Z;4;$(r=WX>DO=h9{0 zCu>|ze{-Z~nA0EE3&42=O}vkj$DaxVc|TjIATP0(_qHGX?2W&1T*y^ESK9e(3$C*` zTMu6S`RhudnQ~v1)c2Ly35$9WoZjq_Ac#7{SbJvzgh5g;2K4ac$&IVeg2T&kzTwrMvw&q6Nf;!A&l_HzG zb)`6c2xoWW_36_g9K5DJ>v&M%{n!Cj=C<&n++orjzBtxOyoHcBQP=gV^d5I`Ku{2? zBF$u}JB^UsmZ3MRIPsh?0MmoS0lnn(d~k+mmreV}AkAE|ZgJ}8kEP2-Xb_cwLv0vX z%>+R{-RiqG&hRc|JnnFpExQOf#+p=cT6r&CUouE)?8qCyr2+76)z zbt_H>CD4HeI{WH|E_y8JuHRDV$q5f@{4mBS4u3XA$+u7Zn&_8?! zb|Vqk)#}J7Q~j|w5Nrhr-Mk&|s(vZ^@>>c3_6d4>mh|S281ZoR4fOf=e9iy5_8zu$TYc|Cy!`QK0J3jQy+GtiQJq}%a%zIXb1c=^-H{@-uk{wiYBoXmU2ze!fR-euBQPoIpupNU#k?q1c-CE2z3mB5Y9kVdx&zd?5mINNK@~m+b-zRF6F@#%u zx8-#2^&O%AQEManL8@`Jr!p{89Vwpy0PBl z?{Ysu_+`m|yFOWAMo!Zt-A1Gs70hy{4lg$mv+<9V>R}DbjVaQh#~SNPYh`J3Y7p-E z613GNDQ~9Uk!XwQ+S^5rbyg0H$>Pb-E@uKdWSM~hlXQQ zfhQK_`Pf$84=Tc;fdxj-K}`o2ZommmC@QX`@=s}rTTJSv?+^uz&>00Xni`Fb=-|&t zcF|j$5}YmGDPTN&8eV|weREm39;=-x*HXwGT__5Pd-=Iyn@?*%5DWeFhoYOS+(LuS zq+O+wE&(e%6=ggLiD~Q>iRznsNkW3jpCj69;e{=+mYEz2YlK|RQpfov*7B>$8k8*8 zGF2ECkp|JE6eqZKjhulB$w|-UBsp45$J|B0F?hxAuh%CwywGGJ1`<~IuUCEaAl0YH z%|j)#g?rULiMlx|6(WRjRhJi=Rj1t$Xf?awF)bo&;W%F~sH}~ry8UHz{C7iImjf0G z#abdqeHR|OW&)kP5_8S@X};PcsIKXoen}KHF$dx6g&bc`-Iahb?$Fhef9b=q3^ z$56rpeLJ=clD@3aA&6VSROaCbLGT)&GtbimB&u ze_+4

PvH=6aFdyJe&0r)47I>Z0pEaVH;PV5>AJNPR0xn#;1LkD#Y$mq{a|9cepi zG9Q8Rez`JV*RjAw4(F0;cUVyFk2XzS^{{lXlZq&}1i0hlcicam%K^rc5zNqg%&2qm8Q`*9}qfc!Fz~XmJVj zafIO>958s11))e+I3*jqk9gwrgfV+aZe_@G{^WwWS0U9rAj+enFtufFAE%AyNM~aG(x;Nu-5?HdxGOrA@S|!I4RT`SWWd#6qbL%8Nig`0!}tI)LSq(kKgNffvKl`52s}7 zKu3xU?ulpr_Q&0h%=L#ajXa?AyZ_{AUuNif$G`5RKRr<40)_M9i=!uD9%xo(eu_Ey zxGUoZpuv*y1kk(!HN1fNdKp{-Iyb;$fWoIh&=JHQOUyyBHR#YQaDC)`Wa{1C&CIWG z3#f1`admGr)gzM>D0;ZJD5K9)(d5by2Be1uUj#2*S3uc04*&f8DHh~Ea2R3p7;ff> zYAo&%YcUxbfRCAeL$S`{K0hU9JfhyS=e2eG?IZ>=f>A#Elt zqZV6;WQ%%50xnYF#Ii9G+-(+(WuMny}CbC}EAKt3CKWF7cYgGA#` zqf$tAKFC|=ZY~LFWnN5|vw%W}3>=w?G4glSm1T&fAL~|?sNj#(C@=Ud@(E!<_J4Xd z6_J;Vm!@_3&_WF9Rsz{9+be0!wFkh?&d?EI@+gYDGaYWaZVCb!rOzr4r4)Ou^3>!s z>Zn<9DFi_XIL7atYngR$Tm*uJe-O+jt2NwJxvKxD!tqa)!Sql6+n{Y-q!O_BO2wb? z(P()~-l-C^DhBz&(HRDSL-V}-u){4gK-%6wJ>E^bOn7oa-)?XlybC4|PkXjL0~oQo z@*!;LUJn}EX&x`^<6Mqja&Sm?v%4Q982ZZaR90rkVRdPVtNQyva+$G32{&E0@Fkx^ zIws584+cWt(%wV(XLMLPiNCO$Nqw zxHwna)7H93JN@oUO@13CMpQjqkf2c6a#%rAVq83-?Gpam_$$VO`89C9{DEIKyTt~N z?r$Db(fStjN(OYNVmHCbPoi&`=wGRkZ=}ZXt!qXlaw=8YcTREe0g0c1qod@&N=B;T z?c6`EZOVNf-pV<}`5*MAW-EeWsKHec7ODFt_i7Waz%onaB=9JwOGG))R-d7s)X^-2&4{qsj zhm`eaaCd3&4Wt3K>#&7a6vFG~vG6V@-LYe{JZ{TCX}d+{wtL^(JY17kyUWM|!sz0D z`8;x-mG60knPO`EqPJ*|Y>ZWzQ05dP`b8DcR&fHhLi?pkoE6<>h~yIfm?NfWX$?J( z^F5v?g;L52j3A3$XE8(D^Bnn`(sF!)55c6In?O-&r^EAi>FZ=i#U3KUtT!C5-s{dI zRpevx9j=V>CRmOPixXog3mlfTzm_MH+3yY#WLYc^sWcZ`AvvG0a4n<7%%Fw5T;bD& zzG?=iUN&zs@i)(n4$3xfXR4H(G3pY8+m-O`E>DT#&I<%(=$hSym|7d~wKEiBXOa{c zc{^*d+l)`)Plwas>E7s%kHy-i_ypZ+8~l`MI;bD@U7#QIqpd9y#Y87rLz>vc<&iLF z>zg-9vFZ40J{P(nuDS$Lm=F|r4m@@|qR7_9duxHbmw#Fg=U%@Oh}|(_eO26aZq=yU z7~h;JuAqprl*uu}tqHI`UR<0zXUwt|-NWjd&QEaR-)9cy#?DWk{Wje{aZ*!LelXcp zv^||D^09NbXK+YzBDW^r(O0Wnky?0=K!B#ac7D@C;jA5l+PregoJhT`Y^mFma<@6q1YsjdO*CWK7PTROElgB+_@n_Z3^znwva)=W!h#~L^ zvCApIw6W}23Y5Wr^Wd&GtW=o`5R+jD1w<>ENLco%o`{|Gj;8QROs`f(O7>`|h*Fmo zKAx~86yn@Q&$~m8#8q8iNZYxg#Z@0jf8IBwY_VdVC_Qft%M=pvcxJ!EeHST1)C9#V z`;0m(ximR;NVC1F6rN1ZP+i*US=M2c+6X%xl$>`|$6H0vI$ovy=ig&Vgb8gGuza45YoG@4geWlQOJE!0zLt zb187u@@vnG7Oz{UfZ53Dbes>w7lWj*j!MHp6{GboAelU1&yzsC$J@UkbPjS#zT-vp$x&A(FeRHBYP#2n+LFK=ZOb z?_Rv2_EzvJs>3}2y%P5tST>|N3NC$!s5h;>!4CJxBpKpd96KrGxAz|0SPpau&9C@P zcimd=G5@Z3gyG)um6Fj&UZOQ~FXiUfXgo#7f%XlnDABY z^CY;vg)$rUeYLb8B-!B5m<-=lw5j;KEMMwi`Sb5u=iK{-I;QGQfHc`5gmLY5>yF~R zFnM2*D1l0XTx4lG_-g-@OFpx#9%q1|N(I(J`G|gE8CEjM@H@j{LP|qaWrtPfBI_`& znFK@o5UdnbF4Aldnu||j4QxD>CA+=)yn?oXlZ;VEc`*tmT0+jIA0D@(BF zbG+F!7vwAqgM44hi6`>tf(|({??pnZnH+0ud)W>N%A|_K^CqnnPFQQ%SvyO=Upvk8 z+=!QD>qaEj34p4!h5bFYpk^>R!`G63ZKag_YoXUBBZ7IQtTvnfiy6!K zB-$#gsv(L?v9**Y=I(e8zfEs!qS>u7!v|!$bxljzSTV~bZdSA(s+-ANc(&?R+qgZ=62EGjxiM9f!6hW`K}>^j=q8 z#AjgBUQKw=Zb198D7!Frm)n+5(J#-p5Ze=1k-|NwZ+kDJaQ;`kg=GP2d+p(&MmuLt zZE{t~mIlkEFbM|Pr5OLQQ`RgUU+wWH;8aALiFU{&Z#yjzIV~d-$a1N_HYwYr)SefK zkpM8oT*Q0RiOO0h&cf<_Q%m(oaXm@i*iS0Nm&Xy2nf|MrYVS3m&#AR6z__)%pA!@f zCYo1gnoGOZR)yKeW5wU*a~XA*I{Gv-5K_8^GCir>skq#jLO0h#OLqN{UehE7msw*Y zFVck3XlEY*$WTPL*G@X5@@5{bvK_OEwTH!z`aRXfB4rEfoZxj71)!JrUgY zC>5Ul2J;Rt5eJeIv8$D(c}ODA48xTS9Z^&Pho>7}0^B@i9kFFCT!a+~c6MCXoDi`( z@i)uEa;&)3c+;TYQD<(Tmu~S;gi@i=%R-GlFWG?~in!NWBmy57Ov&#dW#n)*Jh1mf z>^@y0q|49-2}?G}lL8p?#ELy?i!sHlJ)?$BoZCT8Q*jqduIi>wXp87)rqZ&nf=d&n zBW-!RN>c`#FOfo|CBtlvToav^(YbxuZ zp?BNxjX$51MCI=}nCRc4LQHh%3rME6#cI0NU)ADhAmOI9V*HRHv2-`04i17UEa9qD< z6GPPMXAg5?YUHAZFm_rapch2cI;6acB`ZhC&&35*K)4us4?uWwPzR|X=QzjbX6R~; z#^|`QsZc?i);Z$Fk7IoHq0ef%PqCO+(pNohrd)#;Xi#GyR~&gwtmxWjq2L>Y?c{0b z?Hva*o7$D{{K_`I@huDUn+(oFh*q1ia{c*tS;n%5I|TAFLA_cjt-4sJDb8%3y57n% zt;T4j2ED~0HnhChbQ-az`9?9@c}ecCXbw(Dn-bHmk@vTtZKUl?Y*23e3BvD;UoQ1) ztywmnX7h1FnN78hIm`0ZqnWCB+|S0Pj!si}$Sh@jztX@-vfU$G$+Q*&o1>$fzN{V9 zaAQW+*DvN8Y=8cE5Esgp`(SL+TS`{gOq!AwvXG>xjV3K4Y$-)a2U}M_CZw__qa$o7Lum`k zu#nat3nPVEBJ^J0vLrQH(tYBzvw(>Fa5|;=Pr)L+h~Ayb_WO>S4jQ}(b9=GYRGWVx zZr#PZDQgM8t5ipJ;r>VY2{yGQV47}mYtXvEvN)4^{IZ>Fnq@toMdYagL6*!F*$!Ks zKr3;HE@bdGC6$?4XS)60X0!?Z^%lq`oJIS#)D}h|zVxlN=q&_-jq2gGokFxjdt3r2 z6Q17!mJ=mavzk5?$TeHaQNq%ivCeROcBzNC{$Iw%tH`0u)nY`5Yh}_>T^`pZ-#=oakogOOAFIveO z2dVJrfCZNjlM;}ilZiCj)r#&5ysPz1uAGr$JAVD!P3?7>gxtqO_1L%b!&IOOm$4;; zW>ZULCp~9i4aC7!=amZ#k|Lmyb`{P-0?v)iM6qGtfJDUbe0V zMcX*kb?q}CGtiIJNHDcXi=TB?S7G;uAn2&nL%SC{zgo zYSF#tP#M~xKb)*MDkyCgaluw8LgP_!OuORjd!nR$zqHvY1gbS;T$nBATEimyFaqxp zrls&MH4LwHcgIO03DI2{_^dxCkwr^IraWh~+VM`~9?kLyA7*eDq{qjMrizbEtSi?d zFVrve({zx;Q-^8IdPaPUKEYE3By=Q+H`D3BI$RMEZ3`U&vCH0!kIG|I%H*?ko+rpc zbSJoaXzUgNC)5d_JbHN@Ne(@P*nj4aM4sGf5lM8+p2<;Blop#h@piu=*l#@* zV(0bGKjlrVn6yvfQEMGKd@70K&}Al9n1s-s{ozH<2IX)xC-fU2BwSH?qWne1VEXPs zLqQrAmNYdIKc%~Pdbk1W0cMa9wrt}RWm+m@t85zL#5D@@uPPrxY}5 zA+vgA3SO2EE~DX3);oe}hoWK1Wo`ZDpuGQ+JbW6DaJtw-govA_(=$_NHRdQPcg-NE zPG_!M9mU7diOMBaYqACVUkmGS$>p+(s@_hwq4Yrj2S<7va609sPo)7!ck$9U5}J8F za`N^5pRjZR=8dWqNQI5Bkoylm($%DF1ZFk{tN;(Tz7EXSge`$IxBmZ7pQu*>7WgLT z#p7=UDj?fC!?d16A0&<=FgYFA#PaxfI{BHe3}iS4!p{SDDW8GL0Gapr8~V*LZe%=d zV1BM1Cx^iWu=HPA4E*QjuTTH(|Mu9w|NKv1dOGbhP68L-r0)9;+B^#UMsgO|H5rLrFJO@P2?#ikE6l>ivO)%>ndFOV zqwic0Qtp_U%BeHJLRlEdV?V6OT|Bf3C|NpaOFrv@gq66v>t6I8B2uO37y2o@h@lWm z;D=m;<06vmU5q6}?5S0&U8G&cu?-%-F^TkcD3F3=yo*21G*yU6Zq&mL;#o2=$we0i z@Il|UF$c2MApExL^gleT=;9JB$mJeTHxFMZpq18k1?E82sIMDt7C3mzpyOTlRQ7@! zn7thK3}uLnFM&v@`by|VaF@w}F;cbVm zUQx0w z6Z$qXe6q-5v=V79mbi#{=3UK?q0m?~7Ry$`Bug*hO5OTQFLGX}$8AFauDu&ftpR3_ z^~chDS!H_{BSjwM7sw%eQx$G}$sNXpS8h=72?QMs)@rmS1i5!UyP{jEttBWn8(RKgEROe#vhR^5*sDPjk({bS-Y#gFed)IQVa=hWL7(GW zc9|AT%y4tH*<0>L=wZQRH8i|dIno>Ue3WQRf9vFM+q0t&EVQ-N0VnT zYAZ5dF!rZ?rp8s^xkxw+TB9n%$Yo=-Lx&V?zBB%I5*5D31VG<#-9bAfB?pejPeR#= z=d7s*co$@wV?Nx8z^3&1hJA z5D%4W)c9zsA$?2m2%`^sn4?8!!uUA*-V6wu9+gS$`$j_-V=nwg+#yFln~E5-*m z!}aQmr7eHooya1IA}qx)-v;*U{-LJS9>HAyp>@>?z2(Q_TGBDZ2c~44Hm_v#bnfjF zWv_x}WWu9am2Pw+_@syQDHJ}+GDT-B1F0b6;n24F+l}xIqM|#TjkPLtAclp>GI7uCt}VyHy%Ug34RO(*~v? z&&7v29dD)Jp83#njJ>*Mo*qLvFk*YU)@)_>NOddQyHXLj=lWhfaI$cMNxbZOkD6xP zh`1}(H~td1+54AK5{D73GBB-w$ZRz{jECL8EDiLES?)D3UDiqK{XQOWe@F<}95eXb zIIIJnoGSpX))oBUpUnF_zq$nczxKlw{6AwUZod_L^M{TyQk2*SoNFtPCGATzADhq> z9E7&#FO?+jnqe%H#NQ56I@cp)r7}2sTYsHxf{RZNuD;LKm>UmiShF}1yDHN>aIL&( zTs^j~dMa`pV0X?jb4ZsgE>Aj{v#!|)|83(i|2xS4^(gmNCetKj!QsL=Zj7z5t5 zm6^bfTjyeKq&$kc8c4~j$LKl&8PP`%pGKQG*=tD@KQ=#dG9QaNH=7_FDu`qV<{D%2h{v%7;*d zO$fgNFUU3M3@)4ZhJ{l|Lbt9R)FdO%$O$Ys4Fcz~g*#T8VL9J}X`+1NrZLjTk&aq_ z9_1b=u&&V3m(VeJ7iK+IJpW42H$cKld)m+m)njz$jU8UiftIBQ!YEMKl})hUdiCT* z<^APCbA^x^)HdSjQXJDu`UM2^2|Wk^z6}Hbx4V81$^QQTsP?b1UEj-fLch1SI>31` za^L%WxWVVykb#!7Z)?&@9;@XFc?7~P>;8bGkwH=dMfp8S7p>ck!xz*wrVdPOG8j; zGj?Kb+@3Y6-w+!wqn?4tImD4zgeK|%_s&8D6Q!RP9Fz?f(Y#+1Yof8_PV=XvcAF)L z#)@Yz1X5p6O`{AH0$-3Jd9^V?GUVyGcSN1gCw(U6;+v)RPYF2Qbl7ys>&Ix^YJFDw zfL3Ze@WFikLg?cG(sZ5~ zEA4&flm@vmZRR5%Rr996#P}aDqEu6;>*iz$PtS}r>?1?%*kmuz0BO){*^?uUPEwF} zY&J{}!s;AF@dq3j`JoD50mDYFJw%4WZgxq7on)v}+M=y}5eaSP!DI5ZFoYz}KPbqg zhS1o9<(OxFanR3gtAt#ybWb3^uPZgX?M^uZ?^-?f7G%LEW&0nRtPUdys-+O~G;Qor zr_z|(EL_S{Wi^<$9sX1u(fNz}!Rt}79|!vBc*mqs^>}{xNEOTYuBD3XSQx&L#H#7G zW8PV**M=O{^Y`WP5gZ?Ri>VYpOO2}-Me}Lhz)e{RL>{QzSM@kOYYzn85_*84BxJRv zCi!YM&{lHRv^Xg%YrYNss7@_SarPWY{&y~c_Le|CqtDx`!K;5HtAZsE-wddhb-KpB zhYIL}KQ%LBdD#_9fGz!gg9n;`Z+S>_@j_$RdpQL(JuV zRcj9))my#!ZOys=dq})fQYbja&3iD-BoqvFgU5GK4E-|DIX+uZtu)jx3^Fx~Kg>tI zxmt|kygSRh`>F$iJStz~*@Qkw&&^4I45?3Yf~;@ z*854D>l5wgFN7^3Dt)}!&7&y=Sduj`=za$A5`)-GE$j)7xacmj@5oU&V{_u2LZPd1 z{`;pp7kPx&g}?CHlMR}lqrYp-@pNgt9*D~p@85irdn1lP6<>pkNpk3*6Js^v58GpS z#$aZTRm*dmZ=IB;C0_Jbc)P-e<;EXX;2Kw)WKK&yH2J58=l@9rEaxE9KKUpf2K}n> z@|sxcJo(ahX2RMY{dSY(16vUc#~vLBHpr^#SRA0!?+3@;+kLrFUr*D=Z@EZ%^}*^+ z7Y^T_c=PhB;+wRsFeS3n$JVt}9O4dcs-dXx^5U;Hu1MP(lY&IxUb`ngn6lM>_w<3$ z*_~7PGzRyd=W-K|s?=FkDX?P9a0|fC8bv#Hy7&P|c?8mLe2TZ;6`RP61h`ZF8@j25@EpKihiA6v~*Bq=uw8lQ-M>J-{6Z+DZo0*a(5Z)W(Igi0zE+A;ycbC z$pFQ}{s;s9l%!`;`vNCrWmwK{@u8t;!25-L0e$hev2;it?LEQ{4T;FNywM?MP^<&- zxF@4cvg607m%NuU7++@MdhY>9Q?JWysBhXtBi(5wj`6=}b(If7>$XIR(xc=9bvUJ& z`J3qSL#K+U*Y6{yZFdvSF;jV?TLPjsFV9MW>&HLkF}9O*LKz0Jm5f1j(&MAuEE3D{ zupS*S)UlBt6Mi6J_lr!f2mPhJ?6+;4Nz)-|m&8)>GW4|(IN9dv4UXL7 zKQwV&*lA;N4%wG1#)23RpIXiQR{TeLzV$S1!V9I-x4UA>Na_n*;!o=Fm%(X!qP|dlds5v1Pfs3R zfh@=0z=|y(!^JBw8lGh_ditFM6Y~U9Ci9M}7kFwE^+(?Bn}MDdV+6=%3R(knemph& zW=MGj{(FB@fMpRt>chVD2ZMCr1CCD)<*meszt$1(>S?g!91W=U?^6y)f9U)2bbKJ3 z=UzQyKIO#iKl>uo`nr8O`1<{?VDu3BcH1cIlUTiGvM3Q2_s31cvW&*USqp?%@m2>| zK>s{9Gie{-9LdK+>0w@8Z#Wh|$yVf8yx&h!RkQT68M(XPwD)h#Z6gEk8GFm{Kn0U{ z9I9Yg2DHcdMiV{f07!3Sw>KI$%|!+2ulUiBap}gyQP(qvIU|j>R`zD|6b%uOqc_Bj zhf$m)^H2)sa!HqPd9X=)WhOMqtE_i9W$8;xF}yG-MsE3%aQjo|KgdF{^NDB3ux_cj zN^9a}r?%iVFbN}RXYxhzzUwy$WZXm;`x2ts7BiAR>pUTXJU z!dbsWncg%tIRqPe4Fdft%plq9r40JJB9h>;Wr-^S_w0`{gAyk#tXE1`P|l8$!}Fm` zr;_4padhucV0J7leE{{vBA-o~YYefTWLYj#nXe>|G2=jjS)(pfNg*-8uh_!ip-tSo zvv_8WbX63mX`LZF;xi#Mcd4XxpPGoqk~3o|%i-{*l2}`h-lDCQ^w~hh7buIP(3MS$QmaD1_gL&JUMw zBezF1&`WS z?*2zl1D2`&BhQNTl4Qt(rUrJ$P$BLwT3WrxkO`^7IbzgkFP0rEbV7nrbZBRuKaOFj zl)?RMo~*+aD_t&ITSU6qc@Zj@f50nFz@KwGvB>`#ZfxrLdDO-abnbqIvcZB~^l6PRqJz+Kbuf?6F=atcmm&xVjREB>w zy~?L2`9Ibgm_mHFBQPCB@D4Ao)CFPdH{^5h82lqRXa1>3zeL-R)^QvNoAvhRXnNbT zNPB+l2J3nLcwOhlJDgxK3wCk#Qnx8UllfirN2^SIZ$0(dI@wEuS-i4Ym7JkP`ES6~ z0fPTXO9ZD{R<&YvqO!lgXCyT6E6OlHl0XWRsHPp?WXLQq?5n zrw(%QWf2bOa~Zjg34FLb=n@4vlww&fk}1xP3Le?bp%_!Qu8if7G>$Ymi@Y`TL&yk5 zErt8@LZ^a~zcOK}T)J==+{j$#10E&&adPTn!OI&7qEue&LUe=uF7)QtP*9yI*k`5>oB;@aH@Kh_u{m4CAkb9 z?%)-n@KvXoH`Iv+(F?F_A7evhnVveA;AvNA+*W0rPQr=LZe_*7aHZ;g^4Q?;kEF=I z1kic?M6YsG=GZAvJy4D&1;IHl$qi9Jg@wudpVHnrxVNZ#^Nejfxv_0KH@0otww>IV zH@0o(#SshO#o^T(=Fd)L`@>ZtZ!>v=whn!Fo6YGIQvCC0q?c&O@$ z*ENYCS9S#PW>owbYRYs=2F+#0S^RpxotIh1>D~YtjdNeLs%6kbNHG@^WsW!~K*~7F z#K{d2S%M4nIzeugrPF}H#4hcXQMU9BB21hS8@b!$bCSxdE@*E~nB5fjokFYS4{PRF z!YwK9zy8~>unf6~4t)=7mHL(gcix%oqBl<^~ ztvK?aqdXODHvR0LeXdd?@0wP4X<~gODxQF`^M-}u?^kcaa{3)(KddgTGH$Jr63uDP z_oe9bM{mLZFcF!kBkqsCuCds(ekf+PQ9FAx@!sj#A+I@zOYZ}W>%O0zcfZ)m`%UQU z#qXW_t!CNqIdPu;nh~#-t$P%4Ctg_``eN`&sGl)HiC$7O#C`l(T)gNY8gNy+Y`dxx zX=SW^wE7!WC4GFDDz;alu;R&1eh>kx83Jh*b*$^a%# zLYZ$gvh;c?8Qz=vX*a%^QXnWv;iJ=h1_wTZWA%S2Q zC2LLHl4oIU89b~t6oL~c2GRJ`!^;yG1A)|Afj9M_%CzH>DR(w&iY(;mdv@BiE6%`f z_T0%-7E+nyf}lk^^$p0sko#3$C(n;L8%y+Bg3r3ImDGzV=Q-r!5SOx6flzIZ>||0_ zRkr&|Wc&lRG^hy&aGe#u4ms6}^D<8~a0b|`42$^T`;=R z?aErKMU>APq|4C>HcpBmc8$}43$tU&B%~c)dL71+knb)|x@+@&RjK8K#t8y}V?E(N zb>y+Wi&#$#hcMh>9-R=Xi8|q%Xl1^F{{>|^$1ocsbspC~pk_j3K%Ir+H(sl$4++iKx>}25(BR!x zS?(lgU)qv8d~2*khOH9*y>`Y;JM}zdAin3#Y(e2VeTU$5M0aIp6mJ9G`K@T+!}co9 zqe6HYH{Y(W-wDrfHTVceJo8oo@iPA%)H~fCz{qpv2?KA=*4kmnz4Ejwz<2*4Zaj`R zr*+czK+#te{vYA-3(lxUPs@6Ds(1=}q&})|$<0S?S%uMAI zK(Tn1)VlE>iuADv9ksU&HsEL3l=gaaf07&wxF(yy2Jm$<=8XEM-3ds2zDP&^w0VBM zj4U&LwocMh_OFBtu>%L-YKt`B!^=}!glN=@8}_@UFl1t2@n6Y^9d}bgln5gp*;WR5 zm@^sS`I}*(-?Azuj`|PI5;a8O!;ssFMW`2j4fSPM)*C`m*VIHb_rbL&z!FRU+8+tU zcdpunJs6T=Bx=ph&2WG&6yb~kTaNO?!T0Eg$x=;sbZl1wce9@ikNq)WFCjyt#bb<; z8P`>5S0%d+qG`_H5Il^Gu(A}w&z3K*BLn3~#JtQiBZ<@p!J!-Bg1Hx|^}`j7#vNlP zDJ$KdAS|_$VA)c{YlQ8AzJ(X0xMEn>!CO*awn5KOCBJsk0wMv_HSOB<%AiFe(xzZ9NA2(M1BT8A?9 zbsyNKuZ8xqXRvvIuNVh;JiB&-d2f`{Q@}@ru>%#K2n-C<#GZ@RR4aGeUL^P`N7%9r(I6 z>*=%sf!P8Ipm6v)81Pv5`xiE-8TF4222C6p2J|!Z56XIYxUjZW?U98A zSJF%Iwn33(!sFC9XdQqIT$F}E32q zj~Wlde|+{|`^f{zG_dT%RH!E2vfVCKyF38NfhpN)jpHLe0jFu-N z4j5ofwwkFj`1hzr;X|Q114{yD4z8e;xq%@Il4}Ui zg!qFUP*qb6bJeiM8XJzw*xAn>P#20w&wRwHvjjGU=AC(P`InE1gG$PhtA&DOmYpQj ze@5ka)!gy%E;^S)fnFVDSIa<0n~{F)zHO51+a7Z3diby19qfZ_=%5S@qvw#)@&QZo zS5bL%Pr-w8-UCUk>EIc_17lu}h(Ltn&qf`&6ibBPj-=1(Qj=@h5ML-UCEoN6IpMnQ z*Gpe~6Wq>ihb-+F7Z|-zd$|10a)#*D2YHkJ^p6BE+yRbeD6j$Ylns41{OX^8&Gxr7 zyl;QUmLWje-HyKSWUmei#BrW>(sC_6{sV)xM}qSL+v8El(km$=+qd#`U~1g$f}Kc2FE5JrR#MQv`v zYBC6JMyJj_K1$TeOFRtH zs~u5oO}n~r8KVrh&+o`vJbT5-kOxxL%jFVe`x9=1Az^i05b{Qd2a5$Wq>tUPhquNq z>bBv?GBwF<1A|Ii)BmJlTw^H$l8x*F3>JMa6WVm~_Rv=E@C*`rI;+R<^Zym`51KG8 zIp*Ky@rYbkG{YkFYYUQRYX#eu6?TG7Lt3GnVw%8|inter8e<+L#iU*JdQE@ui=V(@ zEc2M%QdX-9d4#_)`v23v*N%j64SMC*TDtN@X)Wzacs;lsI?RhFHZIv=TCx_XsBFS{nCbWF5M?|*X zf$5#m%hB2s3X09wpf>7!AE{ zso1ba+P-!uW(*$!j^@0SOVxZ^vW!yo%eOuuT24oP1w8_j``o2%JvkkJAuUQ){hy~{ zwjs^Ahp;(lMWRoUzPj^8lqO7A8?zXH%isRZJL^Ml!Lv&aaD7B%SI3C?Wk++Sld z!wG8+Ev!xcI0Y-vwcBRc8RS5^7W1_U^-AwHewvsjXs25hJp|Cv)t{xZ>5tkPgYO6_ zL_wo%7;_zBT~~(?IQ#B)Sg~C=fueQVCSAy(B4pXkj+rq^2GZTik}VBI$I0lR5}xd; zl$l+aM-7&(wKuMCQn%m^`C!VT+Y}Jx_Ebx?971yjbhc$xs)ZWWoPlfd*PL!Jhy5!X zkFUHWdgM`S1_f3NU-pB4N1XGKO~Mzuz^}MRiAr1XV6L$DL3$QiX`=3-sD!Rqpu55U zjmf4?_xz@Ut?&m!*>z7bIrc2u;@noxiPv^A+eyK8TQmrm0@1(-D~$YY04mgsmgYB| zGNi*-k*2bium$Jk7PKaKSpU?E^*&NXP|v5tIgFiiuVyPXEDV^;c&x#EOobpM_ux&b z!TI7R%n(fU>3ahSJC)=U~rS@XSA{{y36s{Qp?_|8`>b!d*lzH1=Gc@l7 zp(h2y>QMWuQfL7B-it-TI0u-Sbu5gxeZ6=-Zb(%e69`;I~%4yj!}{H$>!?uok*~-U^}|bE|V&3H6YKMHm2oc=5&m- z{F+O$E9wi?e^%`f?V%sc()!v0Cv*{AS?qM<|ros{_{4wzLbtMwZ z9PZSE6I*XX>BtWtqGLH-pNt<<=Ab9^rA)K2Kl29+AmAWQ1*9_#Fdn`P<)M1oB%dXX zIOI3wA&N^R!vs^G&cdf_>&Te1b+eS2(4iG(>CpSaM2t!07WMNRdwGYE9KlM`R#hjt zvTd%AVMTu}8cwj3^j;Kz_P)RLr~MlX!8hjf=0U%|#IC=&*^mFU;hQiTHk$1F1?{dk zk>;T{KwM!?sDE^PZ6NxPgJ}n3GL{FU`@6Zk<|so zkQB{QUSXn8iE5ULjk@8BA8zplBn?9HrQXErU}23QrjMYsRn2OEyfLu)J}|$B*aF*n zD8_kFB@CO?3?jw*z8d->Zdnb+o$qbA=kuL8>ks^*XeGCFeEfK`EO2is&|T+F>F+;+Yd%v03k_Jfj#!Rgq&*m|X_kMYkr{XU#>sZ!%k%;6Wp1Yf zGpQdyC?tXpT8|)}TmDp&=zD&@zl8o<{oH<}me%;N5Kz2Vh1+{9wifRcNV1vwx3~{F zC@HlFH>^kPnR&4xO)fhOmlH&bPKPg+gu~NvCi^UGQlSd!e`HD%_nu6vaTL-GG?&@v zl^x=MZzjbhd6!wZAx&mL*X=zfT9sL_?}IBLFfK_N){X-R@X?Pw}pgCrZ8vtse2K!GrC7tgj5! zuE=nOdjK?%MKKT8Nb@pjMaBjs1PV-re#8mD@B+Z>0L;)&c01N=0w%ulZU9sU?;6~F zP@jOuO<{V#;G+*V;DhzWKj^)*2k1Z)L}GRDmqHZPI&gAvCiT$xk7_O&Gk49~_NeZr zh|!Kt9NaUj!dlG_KL?n{BmiM}HF#jL3C|;65TpPb-y)wQAXxe$=KVUt@UDz3;u?#q zk2Qba6w?B2Xi8^R3oH`2V4J`KS%;EeebL1#$c#gFQf*XS0$%1w3-PAW9!05 z_&_6?sEU}c4`$sR)s}p|grzpX_;9%4x+vFGdWFP9k#k&XyYwT@LM*I!R}^`%ID4+bhI>YcFS)2<&y)cBBTi8IfGf7vj1s)eWgkY^Nzow_8kq zD^3n~NwQJd(J)&r1n_zBpz4fdo$cj&Rv>O z9i}XWPz)v+F`Ne-WRkp5xW~`C)X<3FO7&8x9-UPfT39?OiVb2!`4TY!R7XowA0Cof z2DT}hPC`$?+5C)gxmcLgBdK_b?)n_x>305PlMm}`NzNgQfs-&Rwc?@BYX!VP0qXH)-gi9IE^U#}*N0$SIJQd_TSrxBN zvM`1!JlU6j+a&&zlGx~HBF@;_EPK=NL>HVLxj!CX$02ts@THD|o`_AFh)wwTDOB+%)oxHJf)dF$}#|bBWP&10{O$oqH{n5X!J=X!H<`@_=rdI$WV5tv)<);Si7l%oeF?Qv#RnnuH z52BkAH@$oXRy4L4Pi!|9m)@jrw#VXlI{Tv^5Dv8amI(Uc`SLP^wQ{#H)iyI}Px&T& zV;pQ|1L9L=;scM}W?YwU`K6cRCMFEjq{n=G>)j_}%GloaGYGeg2??M5Sk3(mmM??} z^acSJJatM`X05S<6SVK_{s%n;v`a}|+wx!dvpM9CIaWlUb|_j`Neb*WzAK>;&^M?X z$KDh1Zd zcUkYxO2T?lU$u*X{~oH|^KO0ore9~A9~xP>K2MCw0XgK~pi@tj5Tp=b<^*oFQl{P; zcVAbwO-Zs?hVT^ED%9pknCIa180NfBVmyzETO<>0xUPJYvHep#DNnfSE$VhvJ8kWe zdzF@`*={X!<31vY=xMy;IV5<|s3iq|Ap{g3#}3hO$GXa3DZaYqE!ZIW*-pJ4?{529z`; zh~>u{+G4C%LnfsQ(=`DyNgy+@pW6t%w?<9)h=s{|;iQPBM@bnl#2rHa66=UZ=!(*N zK$e)2P0s$-nTA47kZseqAsd{SpQLQr{5JEyhpNlB0U#+ocYj00yjw)mo+9^4RGipI9O^^ z=&eelePm3o%D~cFUMdW2gehRPjQv0jvU`HSV-S}H*mE%$w;OsI7478Sf76TP{)wKi zc03!Z^-EB03dOR%EP%Xiu8-b$N*eoT8?H!!svp6iPOs6<7~kGz+|@=;D#qc4-^6mu zOPkH=mO8NX8{YXLs`<@|-2eKBRT1BhyMRWP#XZ5)7NbgjIzYuzdMCtwzELNiu)gr0 zyax`8)LvRrHJ6JH*CN8&&Ax9F=wHhSmfCD$6J?>uAq#GG@X`?!X;sP1P*FZvGTjTU zzzt#jPLU~{MA%sBq*r)n7G~6~uJRh)C*5B;S(_9C&!KqRMLZj0-RGSuk8_=ZK@vAw z@6V@KM5teqOHC&)2f>nZB%dieuF6l?I)5?%C8o7;wOn@A3;$*?8F$Fq`_)Lwr8LGj+02sfg*WA0rT-!am?TbR zz(?YQT3`^SzVsG<)iwcA1d6{Ha+MtBPOt&SW@cV?{MG>Nbh<15Qg=Y*n|EWCNZU06 zea)4LgM{}s%4#^z05#=+Iy)F^ZFOJmreK*weRVW18E+0P`Fo0mOJMR*golclX@KrG zpKb@+Q+^q$BRx08C|^msHXS&FJU8(|w5TD&z{Mh3N!G;F1%!gMHu)is?S7+^+D=tHui19*gUcYhw22vz7!J>Mu<(!Po*GdxJtJ^YIDE# z83_FkZnQ4fAOYWV_JFgy8Nla{_#T>p5OB>i^L_gR4*VSbpM=4)9l%lY4d8vOb*Hv* zbmtA-KNx?X4Di}%|108sjKTl?P{QA$eor&Y9?_SFyS{1Io&7@CJzaEW7&`AMZ`A+E z{^ta8ZSL;81w8J*d-*@k81lX^IimOp0lQ!EfRmkL<4kS0Hg$iyBE&t*DLDHXZU*_C zkL~0@Y5nny=TFS~op8QT+gExh8_R}4%(G{@My1 zHd&D?NsQdk|tukLg@*c{egz@V|{d?dS z0TLuRGaixQejl|PVE9^DsU3Drg&-Z3ne}7FE1|k zq~SgO4}`urv%P719$pBti+m5VJdWMU-6w{!K5Z@!fWpD#dGMBAP4z8_r;SiNr!BO5 z68>-EF{2*E$16n{oZKo-FubY4LD8wA>E33?U z@`f(>uOI8{sXn$oPK6RzBA{8poQ|EJ+ASC^IvuqzD21uVg`?KZ$;fkqz>Yb zoybMJSzb|7tF^BwNzuFCFA~Fa?9;moEnYju`wXVHu1u?$9DG!x&b#q=3Bc_7GAM^} zC9_lM29g)pu&A5a?zwsIL%zx8c>tun; zE#|iVKST6~1b+c)3>PCjs%lFq2Sjy@3&q9pT7Kx9nCj{+tH(>tOHa~SLrSdz-XBqE zI#)NxnNJ-bQ?Ed=UjL66eK28tFI%_k`=ijc(6#jlLrL0vth8mmgadbp=QtSra%cPO zyj+4-=S8XNf{rld@Otus#IHhOiSwQF;r3{)WMPz8tC^GGxY_;B@t;8~Gl7t-Bi0yf zDhGGurzWT9r)L{Qb=QzBk`8bXW?i#4Ybae#b_Rg095ds>&+r(|h%V^CyMo zthsW3T&tKPaq6yO+-^VqzcTb=@b1?)kGyqjAJ#XE+ut~j_liKi`bdSGqicgh;vob} zJRkR+GC%G5WA$bb)YI?HVyL?dJSM{uke|5BjO z7jm3>D!;&#;_ENiA5zr+UkLhP0eWBB@3!smZjC}9gT$V;nyJw zz9R+F_73rw(kr2-5X+85ale^#xV+t#A|WqP^n`RJQ`#oV;*2i0RrEVC+;sfs*S2Ce zy?LwRP+7{E7pHTp^HQf_Gsm${)j+`4TwnZRvY>~#qNq&vL&r~yYeMy3Kd(dA&elR_ zl(Vkd`tY*xv9Pf?hkvca14cRS^q)(l>)2m}Hl#&4=ZT;`%J8@Oy;DiX zyTF>+G=f}mFt5jwfjT890@fPMNAm?3xK%4z7S~9-Vs@2k;IPlgP9HFf6B!H}WduiU z?q&ZVzJkmdMP~=>;r>M%;S`P)bSIxdYT&d4yH$tZ3F^QO#YX@MUder?= zvMt#TC6^M&)4(y_ga;AieM1C#(y%jc>2f4pS!eZil4&jNJoZ6U11LLJ6wH0~H?G6? z*1*j(xnB*Uy89I7GxLq6aav2cI!O)+cUAY~#0YHrE5oN7a)N*1e4>KE8$;Sm|J2%X z+vsQ;4v7WGdF2>hz@5aB!;KAY0yPD-=u9|76m59^!LMViMh!(x9026J(*I-MC1`)7 z7H)E?VIr;E$3E4+{PoYtUQlV?@_)zV7jSSrdLYJOGZnUV5}%9cI*NJsnKmvsdPRC& z7$%(X!OZm$m!&`vg+hV%{U4D0ek3C-wKSrlL!>KiT|!+Pq^VAZ7780S826991$_5b z2AxvXnCSnVkKY$@$>d((5b%JCC4hIzgGbeMl5hdSjbHeU3LEy^5160vKDQxh-FPgszNYhu(O9A+xYVrSziGMi?kv2qB ze8=Qs1k6(pE`woQ0_))}$sEkIV~c!TkUSzH8g9CG7?;I()&JX0$iRp?4nH!1=9LT4 z9JIx8R|hFvc)s*Q-;~D0NPzVSp?VfL`du>wu}&dP9KQ~@E+DFS9t7R|XIqIxq~@zF zwMr4^Z9vWrJRWu#^pJ67CQsCOO^p~NoMM|X&c@kSh78ZvkK79rHo8el z@ZR?3Xgd)z6m$-uMSW-xc3m+7&rxq{4;X~;-+|39$zq*ww{3o(Mp`L?0I170N-0|nbQm#C#$D>|;z9zS2(3@ZO$I82MfRSgu(E{SoNY~l|H9LX|;C`QSf#R`q{5p$kX=k zNQi0#Wwk6An^c{GJ3TB5xp9lzNhKOI*QMF*M_?n$7KGw+^5 z&i2C@mTz1w(oV{gbTT}pY;jjJ{lf4Kwh#^d3&fyHiQj@}=p|~hJYA3x*IVsstm8*N zJT*L5>ppq$F6LeZ5G@(cmJ2$*Q*q_(0_IN%S2j*2&~b)Zyy8&hx3Xr~IGdAO(#FXO z`Lg+2(D%$DWc;?ek(NPVMMg4 z#ANlvVnNa!NqdM5%IZ$G>eMJS<+bAxG@X{B<5P{*9ki3QN<;< za6#$oTyEyaQ(Yh3K^t$d3L{kZOjJ6y8RWh7PrQRfz2g~-gDy$Kz;i8*j&2AOhk$OX-=&w;jH}m%|m{&%%K?OxYUf zcUg^p`##;YItRxEL+dSHO0D7L`H0h1p_DbGfo=R22mL0EF{XS>u(y7Qhfb5Xrpv9R zF=yY>V6pdRSrz+7>kn+l`^f%_9K!BPh03}!+X5!wFJ`bx|NCv`U0K&uoqMx{egU$u z@m1kF1w&ADT}zYV$2tGYZVQBI@;$?E*mz5wmK&6}Ut5x$oXO!V#2b-|4zhK2Y+vZs zQ&;9xz3oaFamAnPr`wjjIi!X8_N8B2a^grgiA|VobB3)8=z`)1WH|po&||7@(`~-`QB@8!O3)uy!KTj*B3O_5Pp(@Q6hr2YE^hBZ-HtCjrb+T%Hvb4kR z%E3)G1Xrd`Iw^87CUz>hU}Qpo;scI};3m@XyVA3v#H9X9H@DZlig42QEYgnQe?b9! zC@Wx_K>0#^%Xd%<^`bI(m=?M(sSb@Xx3})2KY&k12BC=HmZP{FaIKagk`_7wMi=vp7axCEzm2O9X7VjxETuf9HeBX zm5JK-FX7h|4SkR#6A_t`U`}aZZWsWjJ%%tQTh(7KpmImnK1vNRmL`{go}y1E6(TO_nAXOfg__jIvanzlbfh$Ldci9mxyk9Swi6adt}Hz zkJR9ox^`w;Y6!dsgZ5)e!4o}mjNm$hZcIKVkavpbvWZK_GfSI234xC=tc#~b;plkbZ8r2;h*D+1vUKoduJ@t9LKVHBtOT#DQadOb zHPDAU^2X#9n|Bj4z}2Y6F_M<8jYIe%JHv zrhG6lPMIhM3#HC8qB#<38{K40CL6SAwb2Wb?gSQ=UC{?OBb zEtE6Y<5CS+rXl4h=|~jdtv_4g$tofV2a-xTYVn)sE`H_xHZ`dcb9vd#5luPr2IHt$ zO9@Q>vYfr98Z_JDMnbnqIX@1N z|K;BdsHMM20^A%$6Z$#WojP@K>hcl;c9eZf}rYfZzgWs5ZMTt1ZfPEJe_{q%4c|s4XgVzADH7g6{c%Mw0GfRoHgW++bJkJ2KGfpZR> zwTH6`yrz%pPLT(z%5W;=|~Dq=rZ@%TLNJF zs+|DveG!V6b(_8u0&M;QK7g>;zWnjT2wh_VY~L%d`Un28fWg~d>L4!SH+`t@o2#p( z|1|2d{#U2&xB08SY6Bkj_l@-+z!$U|2S6xc$zr)AA^e{se})h{0I0D<6%GKpOSSI& z{cA@6qwRqAA?wfDhu-e!SN#+Jg;#+-fXyctH~V+X44~$RZmU_sy(H5L$G2nSkh zPEyQagqGvTqbOybRAM?$C@BZ-Gd`;xI0DE7_+!2TzPG=DrvSYdKlprY|C=KK%j`}j z;A!$KkpDj;D8$gMySfOwnWocup$vMwOKJ}(xxfb#=j40)sSmwiMh#8=2L2A$JHa{_ zi24&{%qI{l%EaBb3N|XxZ!zVj1Wj%zq}m0jtp5& zwWC+mE+FxFa4P|>(fW00s=`sx(bN3!n8i_hEQV0M{)r-fw4vqoaLyh}9XM?T(odf# zGl-*RAo?BSE+Jm<7&o$sxt*uy0qb85@SN)Op3wfJ5U2OKTlD^>(;^lv}nYe$V~akrCr+VU=PQ^~p(Xf*by z4JLO(D~!1nz=N7iwKAa*)+F#$9O}6LYg@Hh)_sVTLVPMWE4fyv=4&eIn-PEjP5-Zt zbGa54OA1-~AEvU7WUDo6W;QHPT6Z(>dH1AAtdX0HP? zy?d>%+8u^^g|`2wR5D@!2S6f#8R#Hq4RZV4>t}{kgn$x7AttOy)6g4I&_%ypUF*hA h#X=3.8 +Description-Content-Type: text/markdown +License-File: LICENSE +Requires-Dist: requests>=2.32.3 + +# EDG4LLM + +

+ + +``` + __ __ __ __ ___ __ __ __ __ +| | |_ | / / \ |\/| |_ | / \ |_ | \ / _ |__| | | |\/| +|/\| |__ |__ \__ \__/ | | |__ | \__/ |__ |__/ \__) | |__ |__ | | + +``` + + +
+ +
+ +[📘Documentation](https://github.com/Alannikos/FunGPT) | +[🛠️Quick Start](https://github.com/Alannikos/FunGPT) | +[🤔Reporting Issues](https://github.com/Alannikos/FunGPT/issues) + +
+ +
+ + +[![GitHub Issues](https://img.shields.io/github/issues/Alannikos/edg4llm?style=flat&logo=github&color=%23FF5252)](https://github.com/Alannikos/edg4llm/issues) +[![GitHub forks](https://img.shields.io/github/forks/Alannikos/edg4llm?style=flat&logo=github&color=%23FF9800)](https://github.com/Alannikos/edg4llm/forks) +![GitHub Repo stars](https://img.shields.io/github/stars/Alannikos/edg4llm?style=flat&logo=github&color=%23FFEB3B) +![GitHub License](https://img.shields.io/github/license/Alannikos/edg4llm?style=flat&logo=github&color=%234CAF50) +[![Discord](https://img.shields.io/discord/1327445853388144681?style=flat&logo=discord)](https://discord.com/channels/1327445853388144681/) +[![Bilibili](https://img.shields.io/badge/dynamic/json?url=https%3A%2F%2Fapi.bilibili.com%2Fx%2Frelation%2Fstat%3Fvmid%3D3494365446015137&query=%24.data.follower&style=flat&logo=bilibili&label=followers&color=%23FF69B4)](https://space.bilibili.com/3494365446015137) +[![PyPI - Version](https://img.shields.io/pypi/v/edg4llm?style=flat&logo=pypi&logoColor=blue&color=red)](https://pypi.org/project/edg4llm/) +[![PyPI - Downloads](https://img.shields.io/pypi/dm/edg4llm?color=blue&logo=pypi&logoColor=gold)](https://pypi.org/project/edg4llm/) +[![PyPI - Python Version](https://img.shields.io/pypi/pyversions/edg4llm?logo=python&logoColor=gold)](https://pypi.org/project/edg4llm/) +
+ + +**Easy Data Generation For Large Language Model(abbreviated as EDG4LLM)**, A unified tool to generate fine-tuning datasets for LLMs, including questions, answers, and dialogues. + + +## Latest News + +
+2025 + +- [2025/01/11] 👋👋 We are excited to announce [**the initial release of edg4llm v1.0.12**](https://pypi.org/project/edg4llm/1.0.12/), marking the completion of its core functionalities. + +
+ +## Table of Contents +- [Latest News](#latest-news) +- [Introduction](#introduction) +- [Features](#features) +- [Installation](#installation) +- [Quick Start](#quick-start) +- [Requirements](#requirements) +- [License](#license) +- [Future Development Plans](#future-development-plans) +- [Acknowledgments](#acknowledgments) +- [License](#license) +- [Contact us](#contact-me) +- [Star History](#star-history) + +## Introduction +**edg4llm** is a Python library designed specifically for generating fine-tuning data using large language models. This tool aims to assist users in creating high-quality training datasets efficiently. At its current stage, it mainly supports text data generation. The generated data includes, but is not limited to: +- **Question data** +- **Answer data** +- **Dialogue data** + +With **edg4llm**, users can easily produce diverse datasets tailored to fine-tuning requirements, significantly enhancing the performance of large language models in specific tasks. +## Features +EDG4LLM is a unified tool designed to simplify and accelerate the creation of fine-tuning datasets for large language models. With a focus on usability, efficiency, and adaptability, it offers a range of features to meet diverse development needs while ensuring seamless integration and robust debugging support. + +1. **Simple to Use**: Provides a straightforward interface that allows users to get started without complex configurations. +2. **Lightweight**: Minimal dependencies and low resource consumption make it efficient and easy to use. +3. **Flexibility**: Supports a variety of data formats and generation options, allowing customization to meet specific needs. +4. **Compatibility**: Seamlessly integrates with mainstream large language models and is suitable for various development scenarios. +5. **Transparent Debugging**: Provides clear and detailed log outputs, making it easy to debug and trace issues effectively. + +## Installation +To install **edg4llm**, simply run the following command in your terminal: + + +```bash +pip install edg4llm +``` + +### Supported Python Versions +- **Supported Python Versions**: Python 3.8 or higher is required for compatibility with this library. Ensure your environment meets this version requirement. + +### Supported LLM Provider +The current version of edg4llm supports the following large language model providers: +- [**InternLM**](https://github.com/InternLM) + - Developer: Developed by the Shanghai Artificial Intelligence Laboratory. + - Advantages: InternLM is a series of open-source large language models that offer outstanding reasoning, long-text processing, and tool usage capabilities. + +- [**ChatGLM**](https://github.com/THUDM/) + - Developer: Jointly developed by Tsinghua University and Zhipu AI. + - Advantages: ChatGLM is an open-source, bilingual dialog language model based on the General Language Model (GLM) architecture. It has been trained on a large corpus of Chinese and English text, making it highly effective for generating natural and contextually relevant responses. +- [**DeepSeek**](https://github.com/deepseek-ai/) + - Developer: Developed by the DeepSeek team. + - Advantages: DeepSeek-V3 is a powerful and cost-effective open-source large language model. It offers top-tier performance, especially in tasks like language generation, question answering, and dialog systems. +- [**OpenAI ChatGPT**](https://chatgpt.com/) + - Developer: Developed by OpenAI. + - Advantages: OpenAI's ChatGPT is a highly advanced language model known for its robust text generation capabilities. It has been trained on a vast amount of data, allowing it to generate high-quality and contextually relevant responses. + +More providers will be added in future updates to extend compatibility and functionality. + +| **Model** | **Free** | **Base URL** | +|--------------------|------------------|------------------------------------------------------------| +| **InternLM** | Yes(Partly) | `https://internlm-chat.intern-ai.org.cn/puyu/api/v1/chat/completions` | +| **ChatGLM** | Yes(Partly) | `https://open.bigmodel.cn/api/paas/v4/chat/completions/` | +| **DeepSeek** | Yes(Free Trial for New Users) | `https://api.deepseek.com/chat/completions` | +| **OpenAI ChatGPT** | No (Paid Plans) | `https://api.openai.com/v1/chat/completions` | + + +## Quick Start + +To get started with **edg4llm**, follow the steps below. This example demonstrates how to use the library to generate dialogue data based on a specific prompt. + +### Prerequisites + +1. Install the **edg4llm** package: +```bash + pip install edg4llm +``` + +2. Ensure you have Python version 3.8 or higher. + +3. Obtain the necessary API key and base URL for your chosen model provider (e.g., ChatGLM). + +### Code Example(Chinese Version) +```python +# chatglm_demo.py + +import edg4llm +print(edg4llm.__version__) + +from edg4llm import EDG4LLM + +api_key = "xxx" +base_url = "https://open.bigmodel.cn/api/paas/v4/chat/completions" + +edg = EDG4LLM(model_provider='chatglm', model_name="glm-4-flash", base_url=base_url, api_key=api_key) +# 设置测试数据 +system_prompt = """你是一个精通中国古代诗词的古文学大师""" + +user_prompt = """ + 目标: 1. 请生成过年为场景的连续多轮对话记录 + 2. 提出的问题要多样化。 + 3. 要符合人类的说话习惯。 + 4. 严格遵循规则: 请以如下格式返回生成的数据, 只返回JSON格式,json模板: + [ + {{ + "input":"AAA","output":"BBB" + }} + ] + 其中input字段表示一个人的话语, output字段表示专家的话语 +""" +num_samples = 1 # 只生成一个对话样本 + +# 调用 generate 方法生成对话 +data_dialogue = edg.generate( + task_type="dialogue", + system_prompt=system_prompt, + user_prompt=user_prompt, + num_samples=num_samples +) +``` +### Code Example(English Version) +```python +# chatglm_demo.py + +import edg4llm +print(edg4llm.__version__) + +from edg4llm import EDG4LLM + +api_key = "xxx" +base_url = "https://open.bigmodel.cn/api/paas/v4/chat/completions" + +edg = EDG4LLM(model_provider='chatglm', model_name="glm-4-flash", base_url=base_url, api_key=api_key) + +# Set the test data +system_prompt = """You are a master of ancient Chinese literature, specializing in classical poetry.""" + +user_prompt = """ + Goal: 1. Please generate a multi-turn dialogue set in the context of celebrating the Lunar New Year. + 2. The questions should be diverse. + 3. The dialogue should align with natural human conversational habits. + 4. Strictly follow this rule: Please return the generated data in the following format, only in JSON format. JSON template: + [ + {{ + "input":"AAA","output":"BBB" + }} + ] + Where the input field represents a person's dialogue, and the output field represents the expert's response. +""" +num_samples = 1 # Generate only one dialogue sample + +# Call the generate method to generate the dialogue +data_dialogue = edg.generate( + task_type="dialogue", + system_prompt=system_prompt, + user_prompt=user_prompt, + num_samples=num_samples +) + +``` + +### Explanation + +1. Importing the Library: Import the edg4llm library and verify the version using print(edg4llm.__version__). + +2. Initialization: Use EDG4LLM to initialize the library with the appropriate model provider, model name, base URL, and API key. + +3. Prompts: + - system_prompt defines the behavior or role of the assistant. + - user_prompt provides specific instructions for generating data. +4. Data Generation: +Use the generate method with the following parameters: + - task_type: Defines the type of task (e.g., dialogue, question-answering). + - system_prompt and user_prompt: Provide context and task-specific instructions. + - num_samples: Specifies how many samples to generate. +5. Output: The generated data is returned as a JSON object in the specified format. + +## Requirements +This project has **minimal dependencies**, requiring only the requests library. Make sure to have the following version installed: + +- requests>=2.32.3 + +## Future Development Plans +1. - [ ] Recording Introduction Video +2. - [ ] Support Gemini2 +3. - [ ] Support local large language models +4. - [ ] Support other types of data, such as picture. + +## Acknowledgments +| Project | Description | +|---|---| +| [FunGPT](https://github.com/Alannikos/FunGPT) | An open-source Role-Play project | +| [InternLM](https://github.com/InternLM/InternLM) | A series of advanced open-source large language models | +| [ChatGLM](https://github.com/THUDM/) | A bilingual dialog language model based on the General Language Model (GLM) architecture, jointly developed by Tsinghua University and Zhipu AI. | +| [DeepSeek](https://github.com/deepseek-ai/) | A powerful and cost-effective open-source large language model, excelling in tasks such as language generation, question answering, and dialog systems. | +| [ChatGPT](https://openai.com/chatgpt/) | A highly advanced language model developed by OpenAI, known for its robust text generation capabilities. | + +## License +MIT License - See [LICENSE](LICENSE) for details. + +## Contact Me +Thank you for using **EDG4LLM**! Your support and feedback are invaluable in making this project better. + +If you encounter any issues, have suggestions, or simply want to share your thoughts, feel free to: +- Submit an Issue: Visit the [Issues Page](https://github.com/Alannikos/edg4llm/issues) and describe the problem or suggestion. +- Email Me: You can also reach out directly via email at alannikos768@outlook.com. I'll do my best to respond promptly. + +Your contributions and feedback are greatly appreciated. Thank you for helping improve this tool! + +## Star History + +[![Star History Chart](https://api.star-history.com/svg?repos=Alannikos/edg4llm&type=Date)](https://star-history.com/#Alannikos/edg4llm&Date) diff --git a/edg4llm.egg-info/SOURCES.txt b/edg4llm.egg-info/SOURCES.txt new file mode 100644 index 0000000..0c7a2ce --- /dev/null +++ b/edg4llm.egg-info/SOURCES.txt @@ -0,0 +1,35 @@ +LICENSE +README.md +setup.py +edg4llm/__init__.py +edg4llm.egg-info/PKG-INFO +edg4llm.egg-info/SOURCES.txt +edg4llm.egg-info/dependency_links.txt +edg4llm.egg-info/not-zip-safe +edg4llm.egg-info/requires.txt +edg4llm.egg-info/top_level.txt +edg4llm/core/__init__.py +edg4llm/core/dataGenerators.py +edg4llm/core/interface.py +edg4llm/core/pipeline.py +edg4llm/generators/__init__.py +edg4llm/generators/text_generators/__init__.py +edg4llm/generators/text_generators/answer_generator.py +edg4llm/generators/text_generators/base_generator.py +edg4llm/generators/text_generators/dialogue_generator.py +edg4llm/generators/text_generators/question_generator.py +edg4llm/models/__init__.py +edg4llm/models/baseModel.py +edg4llm/models/chatglm.py +edg4llm/models/chatgpt.py +edg4llm/models/deepseek.py +edg4llm/models/internlm.py +edg4llm/processor/__init__.py +edg4llm/processor/postprocess.py +edg4llm/processor/preprocess.py +edg4llm/utils/__init__.py +edg4llm/utils/config.py +edg4llm/utils/data_utils.py +edg4llm/utils/exceptions.py +edg4llm/utils/logger.py +edg4llm/utils/template.py \ No newline at end of file diff --git a/edg4llm.egg-info/dependency_links.txt b/edg4llm.egg-info/dependency_links.txt new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/edg4llm.egg-info/dependency_links.txt @@ -0,0 +1 @@ + diff --git a/edg4llm.egg-info/not-zip-safe b/edg4llm.egg-info/not-zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/edg4llm.egg-info/not-zip-safe @@ -0,0 +1 @@ + diff --git a/edg4llm.egg-info/requires.txt b/edg4llm.egg-info/requires.txt new file mode 100644 index 0000000..d86a09d --- /dev/null +++ b/edg4llm.egg-info/requires.txt @@ -0,0 +1 @@ +requests>=2.32.3 diff --git a/edg4llm.egg-info/top_level.txt b/edg4llm.egg-info/top_level.txt new file mode 100644 index 0000000..7080f1d --- /dev/null +++ b/edg4llm.egg-info/top_level.txt @@ -0,0 +1 @@ +edg4llm