diff --git a/.github/workflows/python-test.yml b/.github/workflows/python-test.yml index 9017f795a..0e51c6de1 100644 --- a/.github/workflows/python-test.yml +++ b/.github/workflows/python-test.yml @@ -14,7 +14,7 @@ jobs: python-version: ['3.9', '3.10', '3.11', '3.12'] steps: - - uses: actions/checkout@v3 # Updated to the latest version + - uses: actions/checkout@v4 # Updated to the latest version - name: Set up Python ${{ matrix.python-version }} uses: actions/setup-python@v4 # Updated to the latest version with: @@ -37,7 +37,7 @@ jobs: poetry run pytest - name: Upload pytest results as an artifact (optional) - uses: actions/upload-artifact@v3 # Updated to the latest version + uses: actions/upload-artifact@v4 # Updated to the latest version if: always() # Always run this step to ensure test results are saved even if previous steps fail with: name: pytest-results diff --git a/adalflow/CHANGELOG.md b/adalflow/CHANGELOG.md index f87773ab4..1b0bef631 100644 --- a/adalflow/CHANGELOG.md +++ b/adalflow/CHANGELOG.md @@ -1,10 +1,23 @@ -## [0.2.7] - TO Be Released +## [0.2.7] - 2025-01-16 + ### Added - `Memory` is completed with `call` and `add_dialog_turn` methods. - Integrated `LanceDB` in the `Retriever` +- Multi-modal (image input and generation) in `OpenAIClient` along with tests. +- `ComponentList` to support a list of components registered in a component. Added `test_componentlist` to test the `ComponentList`. + ### Improved +- Better diagnose report for `Trainer.diagnose`. - `BedrockAPIClient` added more details on setup, yet it is still in experimental stage. - `AzureAPIClient` added more details on setup, yet it is still in experimental stage. +- `Retriever` class: + - Support data id (field). +- `GradComponent`: Support pass-through gradient for the `forward` method. + +Optimization +- Aggregated all backward engine prompts in `backward_engine_prompt`. +- Added `TGDData` for the optimizer to support reasoning at proposing new prompt. +- Added `sequential_order` in the `Trainer` to support the sequential training order. Reorganized the trainer code. ## [0.2.6] - 2024-11-25 ### Improved - Add default `max_tokens=512` to the `AnthropicAPIClient` to avoid the error when the user does not provide the `max_tokens` in the prompt. diff --git a/adalflow/adalflow/__init__.py b/adalflow/adalflow/__init__.py index fa4cd930c..bc20655fb 100644 --- a/adalflow/adalflow/__init__.py +++ b/adalflow/adalflow/__init__.py @@ -1,7 +1,7 @@ -__version__ = "0.2.6" +__version__ = "0.2.7" from adalflow.core.component import Component, fun_to_component -from adalflow.core.container import Sequential +from adalflow.core.container import Sequential, ComponentList from adalflow.core.base_data_class import DataClass, DataClassFormatType, required_field from adalflow.optim.grad_component import GradComponent @@ -63,6 +63,10 @@ BedrockAPIClient, ) +# data pipeline +from adalflow.components.data_process.text_splitter import TextSplitter +from adalflow.components.data_process.data_components import ToEmbeddings + __all__ = [ "Component", "fun_to_component", @@ -72,7 +76,10 @@ "required_field", # Container "Sequential", + "ComponentList", + # Grad Component "GradComponent", + # Functional Component "ModelClient", "Generator", "Embedder", @@ -99,6 +106,9 @@ "JsonOutputParser", "ListOutputParser", "DataClassParser", + # Data Pipeline + "TextSplitter", + "ToEmbeddings", # Types "GeneratorOutput", "EmbedderOutput", diff --git a/adalflow/adalflow/components/model_client/openai_client.py b/adalflow/adalflow/components/model_client/openai_client.py index 809fd3e01..1e22d5ef4 100644 --- a/adalflow/adalflow/components/model_client/openai_client.py +++ b/adalflow/adalflow/components/model_client/openai_client.py @@ -1,6 +1,7 @@ """OpenAI ModelClient integration.""" import os +import base64 from typing import ( Dict, Sequence, @@ -35,6 +36,7 @@ from openai.types import ( Completion, CreateEmbeddingResponse, + Image, ) from openai.types.chat import ChatCompletionChunk, ChatCompletion @@ -99,7 +101,7 @@ def get_probabilities(completion: ChatCompletion) -> List[List[TokenLogProb]]: class OpenAIClient(ModelClient): __doc__ = r"""A component wrapper for the OpenAI API client. - Support both embedding and chat completion API. + Support both embedding and chat completion API, including multimodal capabilities. Users (1) simplify use ``Embedder`` and ``Generator`` components by passing OpenAIClient() as the model_client. (2) can use this as an example to create their own API client or extend this class(copying and modifing the code) in their own project. @@ -110,6 +112,17 @@ class OpenAIClient(ModelClient): Instead - use :ref:`OutputParser` for response parsing and formating. + For multimodal inputs, provide images in model_kwargs["images"] as a path, URL, or list of them. + The model must support vision capabilities (e.g., gpt-4o, gpt-4o-mini, o1, o1-mini). + + For image generation, use model_type=ModelType.IMAGE_GENERATION and provide: + - model: "dall-e-3" or "dall-e-2" + - prompt: Text description of the image to generate + - size: "1024x1024", "1024x1792", or "1792x1024" for DALL-E 3; "256x256", "512x512", or "1024x1024" for DALL-E 2 + - quality: "standard" or "hd" (DALL-E 3 only) + - n: Number of images to generate (1 for DALL-E 3, 1-10 for DALL-E 2) + - response_format: "url" or "b64_json" + Args: api_key (Optional[str], optional): OpenAI API key. Defaults to None. chat_completion_parser (Callable[[Completion], Any], optional): A function to parse the chat completion to a str. Defaults to None. @@ -118,6 +131,8 @@ class OpenAIClient(ModelClient): References: - Embeddings models: https://platform.openai.com/docs/guides/embeddings - Chat models: https://platform.openai.com/docs/guides/text-generation + - Vision models: https://platform.openai.com/docs/guides/vision + - Image models: https://platform.openai.com/docs/guides/images - OpenAI docs: https://platform.openai.com/docs/introduction """ @@ -200,7 +215,7 @@ def track_completion_usage( def parse_embedding_response( self, response: CreateEmbeddingResponse ) -> EmbedderOutput: - r"""Parse the embedding response to a structure LightRAG components can understand. + r"""Parse the embedding response to a structure Adalflow components can understand. Should be called in ``Embedder``. """ @@ -218,7 +233,20 @@ def convert_inputs_to_api_kwargs( ) -> Dict: r""" Specify the API input type and output api_kwargs that will be used in _call and _acall methods. - Convert the Component's standard input, and system_input(chat model) and model_kwargs into API-specific format + Convert the Component's standard input, and system_input(chat model) and model_kwargs into API-specific format. + For multimodal inputs, images can be provided in model_kwargs["images"] as a string path, URL, or list of them. + The model specified in model_kwargs["model"] must support multimodal capabilities when using images. + + Args: + input: The input text or messages to process + model_kwargs: Additional parameters including: + - images: Optional image source(s) as path, URL, or list of them + - detail: Image detail level ('auto', 'low', or 'high'), defaults to 'auto' + - model: The model to use (must support multimodal inputs if images are provided) + model_type: The type of model (EMBEDDER or LLM) + + Returns: + Dict: API-specific kwargs for the model call """ final_model_kwargs = model_kwargs.copy() @@ -232,6 +260,8 @@ def convert_inputs_to_api_kwargs( elif model_type == ModelType.LLM: # convert input to messages messages: List[Dict[str, str]] = [] + images = final_model_kwargs.pop("images", None) + detail = final_model_kwargs.pop("detail", "auto") if self._input_type == "messages": system_start_tag = "" @@ -248,19 +278,74 @@ def convert_inputs_to_api_kwargs( if match: system_prompt = match.group(1) input_str = match.group(2) - else: print("No match found.") if system_prompt and input_str: messages.append({"role": "system", "content": system_prompt}) - messages.append({"role": "user", "content": input_str}) + if images: + content = [{"type": "text", "text": input_str}] + if isinstance(images, (str, dict)): + images = [images] + for img in images: + content.append(self._prepare_image_content(img, detail)) + messages.append({"role": "user", "content": content}) + else: + messages.append({"role": "user", "content": input_str}) if len(messages) == 0: - messages.append({"role": "system", "content": input}) + if images: + content = [{"type": "text", "text": input}] + if isinstance(images, (str, dict)): + images = [images] + for img in images: + content.append(self._prepare_image_content(img, detail)) + messages.append({"role": "user", "content": content}) + else: + messages.append({"role": "system", "content": input}) final_model_kwargs["messages"] = messages + elif model_type == ModelType.IMAGE_GENERATION: + # For image generation, input is the prompt + final_model_kwargs["prompt"] = input + # Ensure model is specified + if "model" not in final_model_kwargs: + raise ValueError("model must be specified for image generation") + # Set defaults for DALL-E 3 if not specified + final_model_kwargs["size"] = final_model_kwargs.get("size", "1024x1024") + final_model_kwargs["quality"] = final_model_kwargs.get( + "quality", "standard" + ) + final_model_kwargs["n"] = final_model_kwargs.get("n", 1) + final_model_kwargs["response_format"] = final_model_kwargs.get( + "response_format", "url" + ) + + # Handle image edits and variations + image = final_model_kwargs.get("image") + if isinstance(image, str) and os.path.isfile(image): + final_model_kwargs["image"] = self._encode_image(image) + + mask = final_model_kwargs.get("mask") + if isinstance(mask, str) and os.path.isfile(mask): + final_model_kwargs["mask"] = self._encode_image(mask) else: raise ValueError(f"model_type {model_type} is not supported") return final_model_kwargs + def parse_image_generation_response(self, response: List[Image]) -> GeneratorOutput: + """Parse the image generation response into a GeneratorOutput.""" + try: + # Extract URLs or base64 data from the response + data = [img.url or img.b64_json for img in response] + # For single image responses, unwrap from list + if len(data) == 1: + data = data[0] + return GeneratorOutput( + data=data, + raw_response=str(response), + ) + except Exception as e: + log.error(f"Error parsing image generation response: {e}") + return GeneratorOutput(data=None, error=str(e), raw_response=str(response)) + @backoff.on_exception( backoff.expo, ( @@ -285,6 +370,19 @@ def call(self, api_kwargs: Dict = {}, model_type: ModelType = ModelType.UNDEFINE self.chat_completion_parser = handle_streaming_response return self.sync_client.chat.completions.create(**api_kwargs) return self.sync_client.chat.completions.create(**api_kwargs) + elif model_type == ModelType.IMAGE_GENERATION: + # Determine which image API to call based on the presence of image/mask + if "image" in api_kwargs: + if "mask" in api_kwargs: + # Image edit + response = self.sync_client.images.edit(**api_kwargs) + else: + # Image variation + response = self.sync_client.images.create_variation(**api_kwargs) + else: + # Image generation + response = self.sync_client.images.generate(**api_kwargs) + return response.data else: raise ValueError(f"model_type {model_type} is not supported") @@ -311,6 +409,21 @@ async def acall( return await self.async_client.embeddings.create(**api_kwargs) elif model_type == ModelType.LLM: return await self.async_client.chat.completions.create(**api_kwargs) + elif model_type == ModelType.IMAGE_GENERATION: + # Determine which image API to call based on the presence of image/mask + if "image" in api_kwargs: + if "mask" in api_kwargs: + # Image edit + response = await self.async_client.images.edit(**api_kwargs) + else: + # Image variation + response = await self.async_client.images.create_variation( + **api_kwargs + ) + else: + # Image generation + response = await self.async_client.images.generate(**api_kwargs) + return response.data else: raise ValueError(f"model_type {model_type} is not supported") @@ -332,22 +445,74 @@ def to_dict(self) -> Dict[str, Any]: output = super().to_dict(exclude=exclude) return output + def _encode_image(self, image_path: str) -> str: + """Encode image to base64 string. + + Args: + image_path: Path to image file. + + Returns: + Base64 encoded image string. + + Raises: + ValueError: If the file cannot be read or doesn't exist. + """ + try: + with open(image_path, "rb") as image_file: + return base64.b64encode(image_file.read()).decode("utf-8") + except FileNotFoundError: + raise ValueError(f"Image file not found: {image_path}") + except PermissionError: + raise ValueError(f"Permission denied when reading image file: {image_path}") + except Exception as e: + raise ValueError(f"Error encoding image {image_path}: {str(e)}") + + def _prepare_image_content( + self, image_source: Union[str, Dict[str, Any]], detail: str = "auto" + ) -> Dict[str, Any]: + """Prepare image content for API request. + + Args: + image_source: Either a path to local image or a URL. + detail: Image detail level ('auto', 'low', or 'high'). + Returns: + Formatted image content for API request. + """ + if isinstance(image_source, str): + if image_source.startswith(("http://", "https://")): + return { + "type": "image_url", + "image_url": {"url": image_source, "detail": detail}, + } + else: + base64_image = self._encode_image(image_source) + return { + "type": "image_url", + "image_url": { + "url": f"data:image/jpeg;base64,{base64_image}", + "detail": detail, + }, + } + return image_source + + +# Example usage: # if __name__ == "__main__": # from adalflow.core import Generator # from adalflow.utils import setup_env, get_logger - +# # log = get_logger(level="DEBUG") - +# # setup_env() # prompt_kwargs = {"input_str": "What is the meaning of life?"} - +# # gen = Generator( # model_client=OpenAIClient(), # model_kwargs={"model": "gpt-3.5-turbo", "stream": True}, # ) # gen_response = gen(prompt_kwargs) # print(f"gen_response: {gen_response}") - +# # for genout in gen_response.data: # print(f"genout: {genout}") diff --git a/adalflow/adalflow/core/__init__.py b/adalflow/adalflow/core/__init__.py index 384725200..a4a67c6a6 100644 --- a/adalflow/adalflow/core/__init__.py +++ b/adalflow/adalflow/core/__init__.py @@ -1,7 +1,7 @@ from .base_data_class import DataClass, required_field, DataClassFormatType from .component import Component, FunComponent, fun_to_component -from .container import Sequential +from .container import Sequential, ComponentList from .db import LocalDB from .default_prompt_template import DEFAULT_ADALFLOW_SYSTEM_PROMPT from .embedder import Embedder, BatchEmbedder @@ -50,6 +50,7 @@ "LocalDB", "Component", "Sequential", + "ComponentList", "FunComponent", "fun_to_component", "DataClass", diff --git a/adalflow/adalflow/core/base_data_class.py b/adalflow/adalflow/core/base_data_class.py index daac546d4..1a379724a 100644 --- a/adalflow/adalflow/core/base_data_class.py +++ b/adalflow/adalflow/core/base_data_class.py @@ -356,8 +356,6 @@ class TrecDataList(DataClass): return dict(ordered_dict) - return ordered_dict - @classmethod def from_dict(cls, data: Dict[str, Any]) -> "DataClass": """Create a dataclass instance from a dictionary. diff --git a/adalflow/adalflow/core/component.py b/adalflow/adalflow/core/component.py index 28bb794e5..d0dd66315 100644 --- a/adalflow/adalflow/core/component.py +++ b/adalflow/adalflow/core/component.py @@ -167,6 +167,7 @@ def use_teacher(self, mode: bool = True): component.use_teacher(mode) return self + # TODO: reassese trace, it should be turned on maybe all the time def trace(self, mode: bool = True): r"""Sets the component in tracing mode.This signal will be used in forward and backward to accumulate input and output.""" if not isinstance(mode, bool): diff --git a/adalflow/adalflow/core/container.py b/adalflow/adalflow/core/container.py index bb2a1a54e..a941adb10 100644 --- a/adalflow/adalflow/core/container.py +++ b/adalflow/adalflow/core/container.py @@ -1,14 +1,63 @@ -"""Container component for composing multiple components, such as Sequential.""" - -from collections import OrderedDict +""" +Container component for composing multiple components, such as Sequential +and ComponentList. + +This design draws inspiration from PyTorch’s modular +container patterns, including `nn.Sequential` and `nn.ModuleList`. The +`Container` component allows for grouping several components into one, enabling +flexible and reusable model architectures. + +Design Motivation: +------------------- +This implementation follows the same principles as PyTorch’s component-based +design, encouraging modularity, reusability, and extensibility. The `Container` +component provides an easy way to manage multiple layers or other components, +while ensuring that their parameters are properly registered and updated during +training. + +Credits: +--------- +The design of this component takes inspiration from the PyTorch project +(https://pytorch.org). PyTorch is an open-source deep learning framework, +licensed under a BSD-style license. Although this code is not part of the +official PyTorch library, it mirrors the same design principles. + +For more details on PyTorch’s licensing, refer to: +https://github.com/pytorch/pytorch/blob/main/LICENSE + +Usage Example: +-------------- + class MyModule(nn.Module): + def __init__(self): + super().__init__() + + self.model = nn.Sequential( + nn.Conv2d(1,20,5), + nn.ReLU(), + nn.Conv2d(20,64,5), + nn.ReLU() + ) + self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)]) + + def forward(self, x): + # ModuleList can act as an iterable, or be indexed using ints + for i, l in enumerate(self.linears): + x = self.linears[i // 2](x) + l(x) + return x + +""" + +from collections import OrderedDict, abc as container_abcs import operator -from itertools import islice -from typing import TypeVar, Dict, Union, Iterable, Iterator, Any, overload +from itertools import islice, chain +from typing import TypeVar, Dict, Union, Iterable, Iterator, Any, overload, Optional from adalflow.core.component import Component T = TypeVar("T", bound=Component) +__all__ = ["Sequential", "ComponentList"] + class Sequential(Component): __doc__ = r"""A sequential container. @@ -311,3 +360,177 @@ def extend(self, components: Iterable[Component]) -> "Sequential": for component in components: self.append(component) return self + + +def _addindent(s_: str, numSpaces: int): + s = s_.split("\n") + # don't do anything for single-line stuff + if len(s) == 1: + return s_ + first = s.pop(0) + s = [(numSpaces * " ") + line for line in s] + s = "\n".join(s) + s = first + "\n" + s + return s + + +class ComponentList(Component): + __doc__ = r"""Holds subcomponents in a list. + + :class:`adalflow.core.ComponentList` can be indexed like a regular Python list, but + the components it holds are properly registered, and will be visible by all + :class:`adalflow.core.Component` methods. + + Args: + components (iterable, optional): an iterable of components to add + + Examples: + + .. code-block:: python + + # Example of how to use ComponentList + class MyComponents(Component): + def __init__(self): + super().__init__() + self.llms = ComponentList([adal.Generator() for i in range(10)]) + + def forward(self, x): + for layer in self.layers: + x = layer(x) + return x + """ + _components: Dict[str, Component] = OrderedDict() + + def __init__(self, components: Optional[Iterable[Component]] = None) -> None: + super().__init__() + if components is not None: + self += components + + def _get_abs_string_index(self, idx): + """Get the absolute index as a string.""" + idx = operator.index(idx) + if not (-len(self) <= idx < len(self)): + raise IndexError(f"index {idx} is out of range") + if idx < 0: + idx += len(self) + return str(idx) + + def __getitem__(self, idx: Union[int, slice]) -> Union[Component, "ComponentList"]: + """Retrieve a component or a slice of components.""" + if isinstance(idx, slice): + return self.__class__(list(self._components.values())[idx]) + else: + return self._components[self._get_abs_string_index(idx)] + + def __setitem__(self, idx: int, component: Component) -> None: + """Set a component at the given index.""" + idx = self._get_abs_string_index(idx) + return setattr(self, str(idx), component) + + def __delitem__(self, idx: Union[int, slice]) -> None: + """Delete a component or a slice of components.""" + if isinstance(idx, slice): + for k in range(len(self._components))[idx]: + delattr(self, str(k)) + else: + delattr(self, self._get_abs_string_index(idx)) + # To preserve numbering, self._components is being reconstructed with modules after deletion + str_indices = [str(i) for i in range(len(self._components))] + self._components = OrderedDict( + list(zip(str_indices, self._components.values())) + ) + + def __len__(self) -> int: + """Return the number of components.""" + return len(self._components) + + def __iter__(self) -> Iterator[Component]: + """Iterate over the components.""" + return iter(self._components.values()) + + def __iadd__(self, components: Iterable[Component]) -> "ComponentList": + """Add multiple components using the `+=` operator.""" + + return self.extend(components) + + def __add__(self, other: Iterable[Component]) -> "ComponentList": + """Concatenate two ComponentLists.""" + + combined = ComponentList() + for i, component in enumerate(chain(self, other)): + combined.add_component(str(i), component) + return combined + + def __repr__(self): + """Return a custom repr for ModuleList that compresses repeated module representations.""" + list_of_reprs = [repr(item) for item in self] + if len(list_of_reprs) == 0: + return self._get_name() + "()" + + start_end_indices = [[0, 0]] + repeated_blocks = [list_of_reprs[0]] + for i, r in enumerate(list_of_reprs[1:], 1): + if r == repeated_blocks[-1]: + start_end_indices[-1][1] += 1 + continue + + start_end_indices.append([i, i]) + repeated_blocks.append(r) + + lines = [] + main_str = self._get_name() + "(" + for (start_id, end_id), b in zip(start_end_indices, repeated_blocks): + local_repr = f"({start_id}): {b}" # default repr + + if start_id != end_id: + n = end_id - start_id + 1 + local_repr = f"({start_id}-{end_id}): {n} x {b}" + + local_repr = _addindent(local_repr, 2) + lines.append(local_repr) + + main_str += "\n " + "\n ".join(lines) + "\n" + main_str += ")" + return main_str + + def __dir__(self): + keys = super().__dir__() + keys = [key for key in keys if not key.isdigit()] + return keys + + def insert(self, index: int, component: Component) -> None: + """Insert a component at the specified index.""" + for i in range(len(self._components), index, -1): + self._components[str(i)] = self._components[str(i - 1)] + self._components[str(index)] = component + + def pop(self, index: Union[int, slice]) -> Component: + """Remove and return a component at the given index.""" + component = self[index] + del self[index] + return component + + def append(self, component: Component) -> "ComponentList": + """Append a component to the list.""" + # self._components[str(len(self))] = component + self.add_component(str(len(self)), component) + return self + + def extend(self, components: Iterable[Component]) -> "ComponentList": + """Extend the list by appending multiple components.""" + # for component in components: + # self.append(component) + # return self + + if not isinstance(components, container_abcs.Iterable): + raise TypeError( + "ModuleList.extend should be called with an " + "iterable, but got " + type(components).__name__ + ) + offset = len(self) + for i, component in enumerate(components): + self.add_component(str(offset + i), component) + return self + + +# TODO: need to do the same to ParameterList and ParameterDict, ModuleDict diff --git a/adalflow/adalflow/core/generator.py b/adalflow/adalflow/core/generator.py index dd0ff5f69..baedd8fb7 100644 --- a/adalflow/adalflow/core/generator.py +++ b/adalflow/adalflow/core/generator.py @@ -4,6 +4,7 @@ import json import re +import os from pathlib import Path from typing import Any, Dict, Optional, Union, Callable, Tuple, List @@ -36,7 +37,7 @@ FEEDBACK_ENGINE_TEMPLATE, LLM_CONVERSATION_TEMPLATE, VARIABLE_AND_PEERS_INFO, - CONVERSATION_START_INSTRUCTION_BASE, + # CONVERSATION_START_INSTRUCTION_BASE, CONVERSATION_START_INSTRUCTION_CHAIN, OBJECTIVE_INSTRUCTION_BASE, OBJECTIVE_INSTRUCTION_CHAIN, @@ -47,6 +48,8 @@ log = logging.getLogger(__name__) +DEBUG_MODE = os.environ.get("DEBUG_MODE", False) + PromptArgType = Dict[str, Union[str, Parameter]] @@ -64,7 +67,7 @@ class Generator(GradComponent, CachedEngine, CallbackManager): Args: model_client (ModelClient): The model client to use for the generator. model_kwargs (Dict[str, Any], optional): The model kwargs to pass to the model client. Defaults to {}. Please refer to :ref:`ModelClient` for the details on how to set the model_kwargs for your specific model if it is from our library. - template (Optional[str], optional): The template for the prompt. Defaults to :ref:`DEFAULT_LIGHTRAG_SYSTEM_PROMPT`. + template (Optional[str], optional): The template for the prompt. Defaults to :ref:`DEFAULT_ADALFLOW_SYSTEM_PROMPT`. prompt_kwargs (Optional[Dict], optional): The preset prompt kwargs to fill in the variables in the prompt. Defaults to None. output_processors (Optional[Component], optional): The output processors after model call. It can be a single component or a chained component via ``Sequential``. Defaults to None. trainable_params (Optional[List[str]], optional): The list of trainable parameters. Defaults to []. @@ -78,7 +81,9 @@ class Generator(GradComponent, CachedEngine, CallbackManager): model_client: ModelClient # for better type checking _use_cache: bool = False - _kwargs: Dict[str, Any] = {} + _kwargs: Dict[str, Any] = ( + {} + ) # to create teacher generator from student TODO: might reaccess this def __init__( self, @@ -96,7 +101,7 @@ def __init__( cache_path: Optional[str] = None, use_cache: bool = False, ) -> None: - r"""The default prompt is set to the DEFAULT_LIGHTRAG_SYSTEM_PROMPT. It has the following variables: + r"""The default prompt is set to the DEFAULT_ADALFLOW_SYSTEM_PROMPT. It has the following variables: - task_desc_str - tools_str - example_str @@ -145,8 +150,8 @@ def __init__( # to support better testing on the parts beside of the model call self.mock_output: bool = False self.mock_output_data: str = "mock data" - self.data_map_func: Callable = None - self.set_data_map_func() + # self.data_map_func: Callable = None + # self.set_data_map_func() self._use_cache = use_cache self._kwargs = { @@ -160,6 +165,9 @@ def __init__( "use_cache": use_cache, } self._teacher: Optional["Generator"] = None + self._trace_api_kwargs: Dict[str, Any] = ( + {} + ) # used by dynamic computation graph and backpropagation def set_cache_path(self, cache_path: str, model_client: object, model: str): """Set the cache path for the generator.""" @@ -373,7 +381,7 @@ def create_demo_data_instance( from adalflow.core.base_data_class import DynamicDataClassFactory # map the input fields - demo_data = {"id": id} + demo_data = {"id": id, "score": None} # add score to trace the prediction score demo_data_class_output_mapping, output_fields = self._get_default_mapping( output ) @@ -409,17 +417,17 @@ def set_teacher_generator(self, teacher: "Generator" = None): print(f"Teacher generator set: {self._teacher}, teacher {teacher}") log.debug(f"Teacher generator set: {self._teacher}") - def set_data_map_func(self, map_func: Callable = None): - def default_map_func(data: "GeneratorOutputType") -> str: - return ( - data.data - if data.data - else self.failure_message_to_backward_engine(data) - ) + # def set_data_map_func(self, map_func: Callable = None): + # def default_map_func(data: "GeneratorOutputType") -> str: + # return ( + # data.data + # if data.data + # else self.failure_message_to_backward_engine(data) + # ) - self.data_map_func = map_func or default_map_func + # self.data_map_func = map_func or default_map_func - log.debug(f"Data map function set: {self.data_map_func}") + # log.debug(f"Data map function set: {self.data_map_func}") # TODO: limit to only one demo parameter. @staticmethod @@ -431,14 +439,41 @@ def find_demo_parameter(prompt_kwargs: Dict) -> Optional[Parameter]: return p return None - # NOTE: when training is true, forward will be called in __call__ instead of call def forward( self, - prompt_kwargs: Optional[Dict] = {}, # the input need to be passed to the prompt + prompt_kwargs: Optional[ + Dict[str, Union[str, Parameter]] + ] = {}, # the input need to be passed to the prompt model_kwargs: Optional[Dict] = {}, id: Optional[str] = None, ) -> "Parameter": - # 1. call the model + r"""Customized forward pass on top of the GradComponent forward method.""" + # 1. convert prompt_kwargs to parameter if it is not + for k, v in prompt_kwargs.items(): + if not isinstance(v, Parameter): + prompt_kwargs[k] = Parameter( + data=v, + name=f"{self.name}_{k}", + requires_opt=True, + param_type=ParameterType.INPUT, + data_id=id, + ) + + # 2. call the model + unwrapped_prompt_kwargs: Dict[str, Any] = {} + for k, v in prompt_kwargs.items(): + if isinstance(v, Parameter): + if v.param_type == ParameterType.INPUT: + v.data_id = id + unwrapped_prompt_kwargs[k] = v.map_to_successor(self) + else: + unwrapped_prompt_kwargs[k] = v + if DEBUG_MODE: + print( + f"unwrapped_prompt_kwargs: {unwrapped_prompt_kwargs}, model_kwargs: {model_kwargs}" + ) + print(f"prompt template: {self.template}") + output: GeneratorOutputType = None input_args = {} if self.mock_output: @@ -446,35 +481,36 @@ def forward( else: if self.teacher_mode and not isinstance(self, BackwardEngine): if not self._teacher: - print( - f"prompt_kwargs: {prompt_kwargs}, model_kwargs: {model_kwargs}" - ) - print(f"names: {self.name}") + if DEBUG_MODE: + print( + f"unwrapped_prompt_kwargs: {unwrapped_prompt_kwargs}, model_kwargs: {model_kwargs}" + ) + print(f"names: {self.name}") raise ValueError("Teacher generator is not set.") log.info(f"Using teacher: {self._teacher}") input_args = { "prompt_kwargs": compose_model_kwargs( - self._teacher.prompt_kwargs, prompt_kwargs + self._teacher.prompt_kwargs, unwrapped_prompt_kwargs ), "model_kwargs": compose_model_kwargs( self._teacher.model_kwargs, model_kwargs ), } - output = self._teacher.call(prompt_kwargs, model_kwargs) + output = self._teacher.call(**input_args, id=id) else: input_args = { "prompt_kwargs": compose_model_kwargs( - self.prompt_kwargs, prompt_kwargs + self.prompt_kwargs, unwrapped_prompt_kwargs ), "model_kwargs": compose_model_kwargs( self.model_kwargs, model_kwargs ), } - output = self.call(prompt_kwargs, model_kwargs) + output = self.call(**input_args, id=id) # 2. Generate a Parameter object from the output combined_prompt_kwargs = compose_model_kwargs(self.prompt_kwargs, prompt_kwargs) - if self.data_map_func is None: - self.set_data_map_func() + # if self.data_map_func is None: + # self.set_data_map_func() predecessors = [ p for p in combined_prompt_kwargs.values() if isinstance(p, Parameter) @@ -494,6 +530,8 @@ def forward( ) response.set_predecessors(predecessors) response.trace_forward_pass(input_args=input_args, full_response=output) + # *** special to the generator *** + response.trace_api_kwargs(api_kwargs=self._trace_api_kwargs) # attach the demo to the demo parameter # if self.tracing: demo_param = self.find_demo_parameter(combined_prompt_kwargs) @@ -509,12 +547,14 @@ def forward( output, id=id, ) - demo_param.add_to_trace(demo, is_teacher=self.teacher_mode) + demo_param.add_dataclass_to_trace(demo, is_teacher=self.teacher_mode) else: log.debug( "No demo parameter found in the prompt_kwargs. You can not trace the demo data." ) + # **** end of the special to the generator **** + if not self.backward_engine: # self.set_backward_engine() log.debug(f"Backward engine: {self.backward_engine}") @@ -547,26 +587,26 @@ def backward( id: Optional[str] = None, # the id of the input ) -> Parameter: - log.info(f"Generator: Backward: {response}") + log.info(f"Generator: Backward: {response.name}") children_params = response.predecessors - is_chain = True + is_intermediate_node = True if response.get_gradient_and_context_text().strip() == "": log.info(f"Generator: Backward: No gradient found for {response}.") # backward score to the demo parameter for pred in children_params: - if pred.requires_opt: - pred.set_score(response._score) - log.debug( - f"backpropagate the score {response._score} to {pred.name}, is_teacher: {self.teacher_mode}" + # if pred.requires_opt: + pred.set_score(response._score) + log.debug( + f"backpropagate the score {response._score} to {pred.name}, is_teacher: {self.teacher_mode}" + ) + if pred.param_type == ParameterType.DEMOS: + # Accumulate the score to the demo + pred.add_score_to_trace( + trace_id=id, score=response._score, is_teacher=self.teacher_mode ) - if pred.param_type == ParameterType.DEMOS: - # Accumulate the score to the demo - pred.add_score_to_trace( - trace_id=id, score=response._score, is_teacher=self.teacher_mode - ) - log.debug(f"Pred: {pred.name}, traces: {pred._traces}") + log.debug(f"Pred: {pred.name}, traces: {pred._traces}") # 1.backward for text-gradients if backward_engine: @@ -587,7 +627,7 @@ def backward( template=template, backward_engine=backward_engine, prompt_str=prompt_str, - is_chain=is_chain, + is_intermediate_node=is_intermediate_node, ) else: log.debug("Backward engine is not set for the generator. No text gradient.") @@ -600,14 +640,17 @@ def _backward_through_one_predecessor( template: str, backward_engine: "BackwardEngine", prompt_str: str, - is_chain: bool = False, + is_intermediate_node: bool = False, ): + """Creating gradient/textual feedback for prompt type parameters.""" if not pred.requires_opt: log.debug( f"Generator: Skipping {pred} as it does not require optimization." ) return - log.debug(f"Generator: Backward through {pred}, is_chain: {is_chain}") + log.debug( + f"Generator: Backward through {pred}, is_intermediate_node: {is_intermediate_node}" + ) if pred.check_if_already_computed_gradient_respect_to(response.id): log.debug( @@ -626,7 +669,8 @@ def _backward_through_one_predecessor( } conversation_prompt_kwargs = { - "variable_name": pred.name, + # "variable_name": pred.name, + # "variable_desc": pred.role_desc, "input_value": input_prompt_kwargs, "llm_output": response.data, } @@ -643,9 +687,9 @@ def _backward_through_one_predecessor( template=VARIABLE_AND_PEERS_INFO, )() - conv_ins_template = CONVERSATION_START_INSTRUCTION_BASE + conv_ins_template = None # CONVERSATION_START_INSTRUCTION_BASE obj_ins_template = OBJECTIVE_INSTRUCTION_BASE - if is_chain: + if is_intermediate_node: # TODO: this will always be true conv_ins_template = CONVERSATION_START_INSTRUCTION_CHAIN obj_ins_template = OBJECTIVE_INSTRUCTION_CHAIN @@ -661,7 +705,9 @@ def _backward_through_one_predecessor( template=obj_ins_template, prompt_kwargs={ "response_desc": response.role_desc, - "response_gradient": response.get_gradient_and_context_text(), + "response_gradient": response.get_gradient_and_context_text( + skip_correct_sample=True + ), "instruction_to_backward_engine": pred.instruction_to_backward_engine, }, )() @@ -673,11 +719,16 @@ def _backward_through_one_predecessor( gradient_output: GeneratorOutput = None if response._score is not None and float(response._score) > 0.9: log.debug(f"EvalFnToTextLoss: Skipping {pred} as the score is high enough.") - manual_response = f"You get a high score: {response._score}." + # TODO: plus score descriptions + manual_response = f"You get score: {response._score}." gradient_output = GeneratorOutput( data=manual_response, raw_response=manual_response ) else: + # manual_response = f"You get score: {response._score}." + # gradient_output = GeneratorOutput( + # data=manual_response, raw_response=manual_response + # ) gradient_output: GeneratorOutput = backward_engine( prompt_kwargs=backward_engine_prompt_kwargs @@ -711,7 +762,7 @@ def _backward_through_one_predecessor( pred.gradients_context[var_gradient] = GradientContext( context=conversation_str, response_desc=response.role_desc, - variable_desc=pred.role_desc, + variable_desc=pred.role_desc, # parameter_desc ) def _run_callbacks( @@ -763,6 +814,7 @@ def call( log.debug(f"model_kwargs: {model_kwargs}") api_kwargs = self._pre_call(prompt_kwargs, model_kwargs) + log.debug(f"api_kwargs: {api_kwargs}") output: GeneratorOutputType = None # call the model client @@ -796,6 +848,7 @@ def call( ) log.info(f"output: {output}") + self._trace_api_kwargs = api_kwargs # tracing return output # TODO: training is not supported in async call yet @@ -841,6 +894,7 @@ async def acall( prompt_kwargs=prompt_kwargs, model_kwargs=model_kwargs, ) + self._trace_api_kwargs = api_kwargs # tracing return output def __call__(self, *args, **kwargs) -> Union[GeneratorOutputType, Any]: @@ -880,6 +934,10 @@ def failure_message_to_backward_engine( return response_value +from adalflow.tracing.decorators import trace_generator_states + + +@trace_generator_states() class BackwardEngine(Generator): # it is a generator with defaule template __doc__ = """The backward engine is a Generator with a default template for the backward pass. @@ -890,10 +948,18 @@ def __init__(self, **kwargs): if kwargs is None: kwargs = {} kwargs["template"] = FEEDBACK_ENGINE_TEMPLATE + super().__init__(**kwargs) self.name = "BackwardEngine" self.teacher_mode = False + def call(self, **kwargs) -> GeneratorOutputType: + r"""Catch the rate limit error and raise it.""" + output = super().call(**kwargs) + if output and output.error is not None and "429" in output.error: + raise ValueError(f"Error in the backward engine: {output.error}") + return output + @staticmethod def failure_message_to_optimizer( gradient_response: GeneratorOutput, diff --git a/adalflow/adalflow/core/prompt_builder.py b/adalflow/adalflow/core/prompt_builder.py index eca455578..0d998b63e 100644 --- a/adalflow/adalflow/core/prompt_builder.py +++ b/adalflow/adalflow/core/prompt_builder.py @@ -1,4 +1,4 @@ -"""Class prompt builder for LightRAG system prompt.""" +"""Class prompt builder for AdalFlow system prompt.""" from typing import Dict, Any, Optional, List, TypeVar import logging @@ -20,10 +20,10 @@ class Prompt(Component): __doc__ = r"""Renders a text string(prompt) from a Jinja2 template string. - In default, we use the :ref:`DEFAULT_LIGHTRAG_SYSTEM_PROMPT` as the template. + In default, we use the :ref:`DEFAULT_ADALFLOW_SYSTEM_PROMPT` as the template. Args: - template (str, optional): The Jinja2 template string. Defaults to DEFAULT_LIGHTRAG_SYSTEM_PROMPT. + template (str, optional): The Jinja2 template string. Defaults to DEFAULT_ADALFLOW_SYSTEM_PROMPT. preset_prompt_kwargs (Optional[Dict], optional): The preset prompt kwargs to fill in the variables in the prompt. Defaults to {}. Examples: diff --git a/adalflow/adalflow/core/retriever.py b/adalflow/adalflow/core/retriever.py index bcde901fc..fb65a298c 100644 --- a/adalflow/adalflow/core/retriever.py +++ b/adalflow/adalflow/core/retriever.py @@ -83,6 +83,7 @@ def call( self, input: RetrieverQueriesType, top_k: Optional[int] = None, + id: str = None, # for tracing, diagnosing, and training **kwargs, ) -> RetrieverOutputType: raise NotImplementedError("retrieve is not implemented") @@ -91,6 +92,7 @@ async def acall( self, input: RetrieverQueriesType, top_k: Optional[int] = None, + id: str = None, # for tracing, diagnosing, and training **kwargs, ) -> RetrieverOutputType: raise NotImplementedError("Async retrieve is not implemented") @@ -102,6 +104,7 @@ def forward( top_k: Optional[ int ] = None, # TODO: top_k can be trained in the future if its formulated as a parameter + id: str = None, # for tracing, diagnosing, and training **kwargs, ) -> Parameter: r"""Customized forward on top of the GradComponent forward method. @@ -123,6 +126,8 @@ def forward( requires_opt=True, param_type=ParameterType.HYPERPARAM, ) + if input is None: + raise ValueError("Input cannot be empty") response = super().forward(input, top_k=top_k, **kwargs) response.param_type = ( ParameterType.RETRIEVER_OUTPUT @@ -135,6 +140,24 @@ def backward( id: Optional[str] = None, backward_engine: Optional["Generator"] = None, ): - r"""Backward the response to pass the score to predecessors""" - log.info(f"Retriever backward: {response}") - pass + r"""Backward the response to pass the score to predecessors. + Function as a relay component""" + log.info(f"Retriever backward: {response.name}") + children_params = response.predecessors + + # is_chain = True + if response.get_gradient_and_context_text().strip() == "": + log.info(f"Generator: Backward: No gradient found for {response}.") + + for pred in children_params: + pred.set_score(response._score) + from adalflow.utils.logger import printc + + printc( + f"Retriever: Backward: {pred.name} set_score: {response._score}, {response.name}", + "blue", + ) + if pred.param_type == ParameterType.DEMOS: + pred.add_score_to_trace( + trace_id=id, score=response._score, is_teacher=self.teacher_mode + ) diff --git a/adalflow/adalflow/core/string_parser.py b/adalflow/adalflow/core/string_parser.py index 246ec1762..3001b5129 100644 --- a/adalflow/adalflow/core/string_parser.py +++ b/adalflow/adalflow/core/string_parser.py @@ -214,6 +214,7 @@ def call(self, input: str) -> JSON_PARSER_OUTPUT_TYPE: YAML_PARSER_OUTPUT_TYPE = JSON_PARSER_OUTPUT_TYPE +# TODO: yaml parser needs to be more robust, currently json works way better than yaml class YamlParser(Parser): __doc__ = r"""To extract YAML strings from text and parse them into a YAML object. diff --git a/adalflow/adalflow/core/types.py b/adalflow/adalflow/core/types.py index 187245101..251635caf 100644 --- a/adalflow/adalflow/core/types.py +++ b/adalflow/adalflow/core/types.py @@ -58,6 +58,7 @@ class ModelType(Enum): EMBEDDER = auto() LLM = auto() RERANKER = auto() # ranking model + IMAGE_GENERATION = auto() # image generation models like DALL-E UNDEFINED = auto() diff --git a/adalflow/adalflow/eval/answer_match_acc.py b/adalflow/adalflow/eval/answer_match_acc.py index 7a9fa8f74..b45e61c14 100644 --- a/adalflow/adalflow/eval/answer_match_acc.py +++ b/adalflow/adalflow/eval/answer_match_acc.py @@ -29,8 +29,27 @@ class AnswerMatchAcc(BaseEvaluator): [1.0, 1.0, 1.0] """ - def __init__(self, type: Literal["exact_match", "fuzzy_match"] = "exact_match"): + def __init__( + self, + type: Literal[ + "exact_match", "fuzzy_match", "rouge_score", "bleu_score", "bert_score" + ] = "exact_match", + ): self.type = type + if self.type == "bert_score": + from torchmetrics.text.bert import BERTScore + + self.bertscore = BERTScore() + + elif self.type == "rouge_score": + from torchmetrics.text.rouge import ROUGEScore + + self.rougescore = ROUGEScore() + + elif self.type == "bleu_score": + from torchmetrics.text.bleu import BLEUScore + + self.bleuscore = BLEUScore() def compute_single_item( self, @@ -67,6 +86,37 @@ def compute_single_item( y = y.lower() y_gt = y_gt.lower() return 1.0 if y_gt in y else 0.0 + elif self.type == "bert_score": + from torchmetrics.text.bert import BERTScore + + self.bertscore = BERTScore() + score = self.bertscore([y], [y_gt]) + # get the data from the tensor + print(f"y: {[y]}, y_gt: {[y_gt]}, type: {type(y)}, type_gt: {type(y_gt)}") + print(score) + single_score = score["precision"].item() + return single_score + elif self.type == "rouge_score": + from torchmetrics.text.rouge import ROUGEScore + + self.rougescore = ROUGEScore() + score = self.rougescore([y], [y_gt]) + # get the data from the tensor + print(f"y: {[y]}, y_gt: {[y_gt]}, type: {type(y)}, type_gt: {type(y_gt)}") + print(score) + single_score = score["rouge1_precision"].item() + return single_score + elif self.type == "bleu_score": + from torchmetrics.text.bleu import BLEUScore + + self.bleuscore = BLEUScore() + score = self.bleuscore([y], [y_gt]) + # get the data from the tensor + print(f"y: {[y]}, y_gt: {[y_gt]}, type: {type(y)}, type_gt: {type(y_gt)}") + print(score) + single_score = score.item() + return single_score + else: raise NotImplementedError diff --git a/adalflow/adalflow/optim/few_shot/bootstrap_optimizer.py b/adalflow/adalflow/optim/few_shot/bootstrap_optimizer.py index c61c3649b..a78c1ec63 100644 --- a/adalflow/adalflow/optim/few_shot/bootstrap_optimizer.py +++ b/adalflow/adalflow/optim/few_shot/bootstrap_optimizer.py @@ -52,7 +52,9 @@ def __init__( for param in params if param.requires_opt and param.param_type == ParameterType.DEMOS ] - log.info(f"BootstrapFewShot: {self.params}") + log.info(f"BootstrapFewShot: {[p.name for p in self.params]}") + + print(f"BootstrapFewShot: {[p.name for p in self.params]}") self._raw_shots = raw_shots self._bootstrap_shots = bootstrap_shots @@ -64,7 +66,11 @@ def __init__( exclude_input_fields_from_bootstrap_demos ) + # TODO: use the scores from the backward engine (optionally) on the demo parameters + # needs to make a decision on which + # this score does not make sense for multiple demo parameters def add_scores(self, ids: List[str], scores: List[float], is_teacher: bool = True): + r"""Add scores for each demo via _teacher_scores or _student_scores.""" if len(ids) != len(scores): raise ValueError( f"ids and scores must have the same length, got ids: {ids}, scores: {scores}" @@ -114,10 +120,16 @@ def sample( weighted: bool = True, ): r"""Performs weighted sampling, ensure the score is in range [0, 1]. The higher score means better accuracy.""" - # 1. sample from augmented demos + # 1. sample from augmented demos (from teacher) # set weights to be score # add 1 to all score to avoid negative weights augmented_options = list(augmented_demos.values()) + + # get the teacher scores length and the augmented demos length + len_teacher_scores = len(self._teacher_scores) + len_augmented_options = len(augmented_options) + print(f"len_teacher_scores: {len_teacher_scores}") + print(f"len_augmented_options: {len_augmented_options}") weights = None if weighted: weights: List[float] = [] @@ -223,6 +235,11 @@ def propose(self): if demo_param.requires_opt: augmented_demos = demo_param._traces demos = demo_param._student_traces + + if len(augmented_demos) != len(demos): + log.warning( + f"augmented and raw demos must have the same length, got {len(augmented_demos)} and {len(demos)} \n {augmented_demos} \n and student demos {demos}" + ) try: sampled_augmented_demos, sampled_raw_demos = self.sample( augmented_demos=augmented_demos, diff --git a/adalflow/adalflow/optim/function.py b/adalflow/adalflow/optim/function.py index 6391b68e4..314fd124c 100644 --- a/adalflow/adalflow/optim/function.py +++ b/adalflow/adalflow/optim/function.py @@ -1,3 +1,5 @@ +"""Inspired by TextGrad: Automatic differentiation via "text" """ + from typing import TYPE_CHECKING, Callable diff --git a/adalflow/adalflow/optim/grad_component.py b/adalflow/adalflow/optim/grad_component.py index 016c08db8..b73e536e0 100644 --- a/adalflow/adalflow/optim/grad_component.py +++ b/adalflow/adalflow/optim/grad_component.py @@ -57,9 +57,6 @@ def forward(self, *args, **kwargs) -> "Parameter": 1. for all args and kwargs, if it is a `Parameter` object, it will be tracked as `Predecessor`. 2. Trace input_args and full_response in the parameter object. 3. Return the parameter object. - - TODO: all Gradcomponent should not allow args but only kwargs. - For now, just check if id is in kwargs. """ from adalflow.optim.parameter import Parameter @@ -85,9 +82,13 @@ def forward(self, *args, **kwargs) -> "Parameter": for v in input_args.values(): if isinstance(v, Parameter): predecessors.append(v) + if v.param_type == ParameterType.INPUT: + v.data_id = kwargs.get("id", None) for v in kwargs.values(): if isinstance(v, Parameter): predecessors.append(v) + if v.param_type == ParameterType.INPUT: + v.data_id = kwargs.get("id", None) # 2. unwrap the parameter object to take only the data, successor_map_fn: lambda x: x.data in default # unwrap args @@ -133,6 +134,28 @@ def forward(self, *args, **kwargs) -> "Parameter": ) return response - def backward(self, *args, **kwargs): - pass - # raise NotImplementedError("backward method is not implemented") + def backward(self, *, response: "Parameter", id: str = None, **kwargs): + """Backward pass of the function. In default, it will pass all the scores to the predecessors. + + Note: backward is mainly used internally and better to only allow kwargs as the input. + + Subclass should implement this method if you need additional backward logic. + """ + log.info(f"GradComponent backward: {response.name}") + children_params = response.predecessors + + if response.get_gradient_and_context_text().strip() == "": + log.info(f"Generator: Backward: No gradient found for {response}.") + + for pred in children_params: + pred.set_score(response._score) + from adalflow.utils.logger import printc + + printc( + f"Retriever: Backward: {pred.name} set_score: {response._score}, {response.name}", + "blue", + ) + if pred.param_type == ParameterType.DEMOS: + pred.add_score_to_trace( + trace_id=id, score=response._score, is_teacher=self.teacher_mode + ) diff --git a/adalflow/adalflow/optim/loss_component.py b/adalflow/adalflow/optim/loss_component.py index 93520de44..e53ac6092 100644 --- a/adalflow/adalflow/optim/loss_component.py +++ b/adalflow/adalflow/optim/loss_component.py @@ -10,7 +10,11 @@ class LossComponent(Component): - __doc__ = """A base class to define interfaces for an auto-grad component/operator. + __doc__ = """A base class to define a loss component. + + Loss component is to compute the textual gradients/feedback for each of its predecessors using another LLM as the backward engine. + + Each precessor should have basic information that is passed to its next component to inform its type such as retriever or generator and its role description. Compared with `Component`, `GradComponent` defines three important interfaces: - `forward`: the forward pass of the function, returns a `Parameter` object that can be traced and backpropagated. diff --git a/adalflow/adalflow/optim/optimizer.py b/adalflow/adalflow/optim/optimizer.py index c6fad814f..943e04e5d 100644 --- a/adalflow/adalflow/optim/optimizer.py +++ b/adalflow/adalflow/optim/optimizer.py @@ -1,12 +1,12 @@ """Base Classes for AdalFlow Optimizers, including Optimizer, TextOptimizer, and DemoOptimizer.""" -from typing_extensions import TypeAlias -from typing import Dict, Any, Union, Iterable, Sequence +from typing import Dict, Any, Union, Iterable, Sequence, List from adalflow.optim.parameter import Parameter from adalflow.core.base_data_class import DataClass -ParamsT: TypeAlias = Union[Iterable[Parameter], Iterable[Dict[str, Any]]] + +ParamsT = Union[Iterable[Parameter], Iterable[Dict[str, Any]]] class Optimizer: @@ -85,3 +85,7 @@ def config_shots(self, *args, **kwargs): def set_dataset(self, dataset: Sequence[DataClass]): r"""Set the dataset for the optimizer.""" self.dataset = dataset + + def add_scores(self, ids: List[str], scores: List[float], *args, **kwargs): + r"""Add scores to the optimizer.""" + raise NotImplementedError("add_scores method is not implemented") diff --git a/adalflow/adalflow/optim/parameter.py b/adalflow/adalflow/optim/parameter.py index 85fe25ead..5b60995c7 100644 --- a/adalflow/adalflow/optim/parameter.py +++ b/adalflow/adalflow/optim/parameter.py @@ -11,14 +11,19 @@ Optional, Literal, Callable, + TYPE_CHECKING, ) +from pyvis.network import Network from collections import defaultdict import logging +import os from dataclasses import dataclass, field import uuid from adalflow.optim.types import ParameterType from adalflow.core.base_data_class import DataClass +if TYPE_CHECKING: + from adalflow.optim.text_grad.tgd_optimizer import TGDData, TGDOptimizerTrace T = TypeVar("T") # covariant set to False to allow for in-place updates @@ -41,7 +46,39 @@ class GradientContext: ) +@dataclass +class ComponentTrace: + input_args: Dict[str, Any] = field( + metadata={"desc": "The input arguments of the GradComponent forward"}, + default=None, + ) + full_response: object = field( + metadata={"desc": "The full response of the GradComponent output"}, default=None + ) + api_kwargs: Dict[str, Any] = field( + metadata={ + "desc": "The api_kwargs for components like Generator and Retriever that pass to the model client" + }, + default=None, + ) + + +# TODO: use this to better trace the score +@dataclass +class ScoreTrace: + score: float = field(metadata={"desc": "The score of the data point"}, default=None) + eval_comp_id: str = field( + metadata={"desc": "The id of the evaluation component"}, default=None + ) + eval_comp_name: str = field( + metadata={"desc": "The name of the evaluation component"}, default=None + ) + + COMBINED_GRADIENTS_TEMPLATE = r""" +{% if combined_gradients %} +Batch size: {{ combined_gradients|length }} +{% endif %} {% for g in combined_gradients %} {% set gradient = g[0] %} {% set gradient_context = g[1] %} @@ -94,6 +131,9 @@ class Parameter(Generic[T]): name: str = None # Name of the parameter, easier to read for humans role_desc: str = "" # Description of the role of the parameter data: T = None # Data of the parameter + data_id: str = ( + None # Id of the data from the training set, used only for input_type + ) param_type: ParameterType proposing: bool = False # State of the parameter @@ -113,11 +153,15 @@ class Parameter(Generic[T]): False # Disable the backward engine for the parameter ) + component_trace: ComponentTrace = None # Trace of the component + tgd_optimizer_trace: "TGDOptimizerTrace" = None # Trace of the TGD optimizer + def __init__( self, *, id: Optional[str] = None, data: T = None, # for generator output, the data will be set up as raw_response + data_id: str = None, # for tracing the data item in the training/val/test set requires_opt: bool = True, role_desc: str = "", param_type: ParameterType = ParameterType.NONE, @@ -132,6 +176,7 @@ def __init__( successor_map_fn: Optional[Dict[str, Callable]] = None, ): self.id = id or str(uuid.uuid4()) + self.data_id = data_id self.name = name self.role_desc = role_desc @@ -166,10 +211,13 @@ def __init__( self.instruction_to_backward_engine: str = instruction_to_backward_engine # here are used for demo parameter, filled by generator.forward - self._traces: Dict[str, DataClass] = {} # id of the data points - self._score: float = score # end to end evaluation score - + self._traces: Dict[str, DataClass] = {} # id to data items (DynamicDataClass) self._student_traces: Dict[str, DataClass] = {} # id + + self._score: float = ( + score # end to end evaluation score, TODO: might have multiple scores if using multiple eval fns # score is set in the gradients in the backward pass + ) + self._demos: List[DataClass] = ( [] ) # used for the optimizer to save the proposed demos @@ -178,6 +226,7 @@ def __init__( self.from_response_id = from_response_id # for gradient parameter self.successor_map_fn = successor_map_fn or {} + self.component_trace = ComponentTrace() def map_to_successor(self, successor: object) -> T: """Apply the map function to the successor based on the successor's id.""" @@ -238,19 +287,48 @@ def set_peers(self, peers: List["Parameter"] = None): ) self.peers = set(peers) + ############################################################################################################# + # Trace the tgd optimizer data + ############################################################################################################ + def trace_optimizer(self, api_kwargs: Dict[str, Any], response: "TGDData"): + from adalflow.optim.text_grad.tgd_optimizer import TGDOptimizerTrace + + self.tgd_optimizer_trace = TGDOptimizerTrace( + api_kwargs=api_kwargs, output=response + ) + + ############################################################################################################ + # Trace component, include trace_forward_pass & trace_api_kwargs for now + ############################################################################################################ def trace_forward_pass(self, input_args: Dict[str, Any], full_response: object): r"""Trace the forward pass of the parameter.""" self.input_args = input_args self.full_response = full_response + # TODO: remove the input_args and full_response to use component_trace + self.component_trace.input_args = input_args + self.component_trace.full_response = full_response + + def trace_api_kwargs(self, api_kwargs: Dict[str, Any]): + r"""Trace the api_kwargs for components like Generator and Retriever that pass to the model client.""" + self.component_trace.api_kwargs = api_kwargs def set_eval_fn_input(self, eval_input: object): r"""Set the input for the eval_fn.""" self.eval_input = eval_input + ################################################################################################################### + # Used for demo optimizer (forward and backward pass) to accumlate the traces on both score and DynamicDataClass + ################################################################################################################### def set_score(self, score: float): + r"""Set the score of the parameter in the backward pass + For intermediate nodes, there is only one score per each eval fn behind this node. + For leaf nodes, like DEMO or PROMPT, it will have [batch_size] of scores. + + But this score is only used to relay the score to the demo parametr. + """ self._score = score - def add_to_trace(self, trace: DataClass, is_teacher: bool = True): + def add_dataclass_to_trace(self, trace: DataClass, is_teacher: bool = True): r"""Called by the generator.forward to add a trace to the parameter. It is important to allow updating to the trace, as this will give different sampling weight. @@ -273,7 +351,12 @@ def add_score_to_trace(self, trace_id: str, score: float, is_teacher: bool = Tru raise ValueError( f"Trace with id {trace_id} does not exist. Current traces: {target.keys()}" ) - target[trace_id].score = score + + setattr(target[trace_id], "score", score) + + from adalflow.utils.logger import printc + + printc(f"Adding score {score} to trace {trace_id}", "magenta") ############################################################################################################ # Used for optimizer to propose new data @@ -349,10 +432,13 @@ def get_gradients_names(self) -> str: names = ", ".join(names) return names - def get_gradient_and_context_text(self) -> str: + def get_gradient_and_context_text(self, skip_correct_sample: bool = False) -> str: """Aggregates and returns: 1. the gradients 2. the context text for which the gradients are computed + + Sort the gradients from the lowest score to the highest score. + Highlight the gradients with the lowest score to the optimizer. """ from adalflow.core.prompt_builder import Prompt @@ -362,13 +448,26 @@ def get_gradient_and_context_text(self) -> str: # sore gradients by the _score from low to high self.gradients = sorted( - self.gradients, key=lambda x: x._score if x._score else 1 + self.gradients, key=lambda x: x._score if x._score is not None else 1 ) - - gradient_context_combined = zip( - self.gradients, - [self.gradients_context[g] for g in self.gradients], + # print the score for the sorted gradients + lowest_score_gradients = [] + for i, g in enumerate(self.gradients): + if skip_correct_sample: + if g._score > 0.5: + continue + lowest_score_gradients.append(g) + print(f"{i} Score: {g._score} for {g.name}, {type(g._score)}") + + gradient_context_combined = list( + zip( + lowest_score_gradients, + [self.gradients_context[g] for g in lowest_score_gradients], + ) ) + # set all gradients value to None + # for g in self.gradients: + # g.data = None gradient_context_combined_str = Prompt( template=COMBINED_GRADIENTS_TEMPLATE, @@ -416,9 +515,28 @@ def build_graph(node: "Parameter"): build_graph(root) return nodes, edges + def report_cycle(cycle_nodes: List["Parameter"]): + """ + Report the detected cycle and provide guidance to the user on how to avoid it. + """ + cycle_names = [node.name for node in cycle_nodes] + log.warning(f"Cycle detected: {' -> '.join(cycle_names)}") + print(f"Cycle detected in the graph: {' -> '.join(cycle_names)}") + + # Provide guidance on how to avoid the cycle + print("To avoid the cycle, consider the following strategies:") + print("- Modify the graph structure to remove cyclic dependencies.") + print( + "- Check the relationships between these nodes to ensure no feedback loops." + ) + def backward( self, - ): # engine should be the llm or customized backwards function to pass feedback + ): + """ + Apply backward pass for for all nodes in the graph by reversing the topological order. + """ + # engine should be the llm or customized backwards function to pass feedback # topological sort of all the predecessors of the current parameter in the graph log.debug(f"Backward pass for {self.data}, backward function: {self.grad_fn}") @@ -441,19 +559,166 @@ def build_topo(node: Parameter): if not node.requires_opt: log.debug(f"Skipping {node.name} as it does not require optimization") continue - node.gradients = _check_and_reduce_gradients(node) log.debug(f"v: {node.data}, grad_fn: {node.grad_fn}, {node.get_grad_fn()}") if node.get_grad_fn() is not None: # gradient function takes in the engine log.debug(f"Calling gradient function for {node.name}") node.grad_fn() + # def backward( + # self, + # ): # engine should be the llm or customized backwards function to pass feedback + + # # topological sort of all the predecessors of the current parameter in the graph + # log.debug(f"Backward pass for {self.data}, backward function: {self.grad_fn}") + # topo: List[Parameter] = [] + # visited = set() + # in_stack = set() # Nodes currently being visited to detect cycles + # cycle_detected = False # Flag to check if any cycle was detected + + # def build_topo(node: Parameter, stack: Set[Parameter] = set()): + # nonlocal cycle_detected + + # if stack is None: + # stack = [] + + # # If the node is already in the stack, we have detected a cycle + # if node in in_stack: + # cycle_detected = True + # cycle_nodes = stack + [node] # The cycle includes the current path + # self.report_cycle(cycle_nodes) + # return False # Stop further processing due to cycle + # if node in visited: + # return + # visited.add(node) + # in_stack.add(node) + # stack.append(node) + # for pred in node.predecessors: + # build_topo(pred) + # topo.append(node) + # stack.pop() # Backtrack, remove the node from the current path + + # in_stack.remove(node) # Remove from the stack after processing + # return True + + # # build_topo(self) + # if not build_topo(self): + # log.error("Cycle detected, stopping backward pass.") + # return # Stop the backward pass due to cycle detection + # # backpropagation + + # self.gradients = set() + # for node in reversed(topo): + # if not node.requires_opt: + # log.debug(f"Skipping {node.name} as it does not require optimization") + # continue + # node.gradients = _check_and_reduce_gradients(node) + # log.debug(f"v: {node.data}, grad_fn: {node.grad_fn}, {node.get_grad_fn()}") + # if node.get_grad_fn() is not None: # gradient function takes in the engine + # log.debug(f"Calling gradient function for {node.name}") + # node.grad_fn() + + def draw_interactive_html_graph( + self, + filepath: Optional[str] = None, + nodes: List["Parameter"] = None, + edges: List[Tuple["Parameter", "Parameter"]] = None, + ) -> Dict[str, Any]: + """ + Generate an interactive graph with pyvis and save as an HTML file. + + Args: + nodes (list): A list of Parameter objects. + edges (list): A list of edges as tuples (source, target). + filepath (str, optional): Path to save the graph file. Defaults to None. + + Returns: + dict: A dictionary containing the graph file path. + """ + from jinja2 import Template + + # Define the output file path + output_file = "interactive_graph.html" + final_file = filepath + "_" + output_file if filepath else output_file + + # Create a pyvis Network instance + net = Network(height="750px", width="100%", directed=True) + + # Add nodes to the graph + node_ids = set() + for node in nodes: + label = ( + f"Name: {node.name}
" + f"Role: {node.role_desc.capitalize()}
" + f"Value: {node.data}
" + f"Data ID: {node.data_id}
" + ) + if node.proposing: + label += "Proposing: Yes
" + label += f"Previous Value: {node.previous_data}
" + if node.requires_opt: + label += "Requires Optimization: Yes
" + if node.param_type: + label += f"Type: {node.param_type}
" + if node.gradients: + label += f"Gradients: {node.get_gradients_names()}
" + + net.add_node( + node.id, + label=node.name, + title=label, + color="lightblue" if node.proposing else "orange", + ) + node_ids.add(node.id) + + # Add edges to the graph + for source, target in edges: + if source.id in node_ids and target.id in node_ids: + net.add_edge(source.id, target.id) + else: + print( + f"Skipping edge from {source.name} to {target.name} as one of the nodes does not exist." + ) + + # Enable physics for better layout + net.toggle_physics(True) + net.template = Template( + """ + + + + + + + +
+ + + + """ + ) + + # Save the graph as an HTML file + + net.show(final_file) + print(f"Interactive graph saved to {final_file}") + + return {"graph_path": final_file} + def draw_graph( self, add_grads: bool = True, + full_trace: bool = False, format: Literal["png", "svg"] = "png", rankdir: Literal["LR", "TB"] = "TB", filepath: Optional[str] = None, - ): + ) -> Dict[str, Any]: """Draw the graph of the parameter and its gradients. Args: @@ -461,10 +726,10 @@ def draw_graph( format (str, optional): The format of the output file. Defaults to "png". rankdir (str, optional): The direction of the graph. Defaults to "TB". filepath (str, optional): The path to save the graph. Defaults to None. + full_trace (bool, optional): Whether to include more detailed trace such as api_kwargs. Defaults to False. """ from adalflow.utils import save_json from adalflow.utils.global_config import get_adalflow_default_root_path - import os try: from graphviz import Digraph @@ -538,6 +803,8 @@ def wrap_and_escape(text, width=40): f"Role: {wrap_and_escape(n.role_desc.capitalize())}" f"Value: {wrap_and_escape(n.data)}" ) + if n.data_id is not None: + node_label += f"Data ID: {wrap_and_escape(n.data_id)}" if n.proposing: node_label += f"Proposing{{'Yes'}}" node_label += f"Previous Value: {wrap_and_escape(n.previous_data)}" @@ -545,6 +812,12 @@ def wrap_and_escape(text, width=40): node_label += f"Requires Optimization: {{'Yes'}}" if n.param_type: node_label += f"Type: {wrap_and_escape(n.param_type.name)}" + if full_trace and n.component_trace.api_kwargs is not None: + node_label += f" API kwargs: {wrap_and_escape(str(n.component_trace.api_kwargs))}" + + # show the score for intermediate nodes + if n._score is not None and len(n.predecessors) > 0: + node_label += f"Score: {str(n._score)}" if add_grads: node_label += f"Gradients: {wrap_and_escape(n.get_gradients_names())}" # add a list of each gradient with short value @@ -562,6 +835,8 @@ def wrap_and_escape(text, width=40): if len(n._traces.values()) > 0: node_label += f"Traces: keys: {wrap_and_escape(str(n._traces.keys()))}" node_label += f"Traces: values: {wrap_and_escape(str(n._traces.values()))}" + if n.tgd_optimizer_trace is not None: + node_label += f"TGD Optimizer Trace: {wrap_and_escape(str(n.tgd_optimizer_trace))}" node_label += "" # check if the name exists in dot @@ -591,6 +866,7 @@ def wrap_and_escape(text, width=40): # raise ImportError( # "Please install matplotlib using 'pip install matplotlib' to use this feature" # ) from e + # ) from e # from io import BytesIO # import numpy as np @@ -623,11 +899,17 @@ def wrap_and_escape(text, width=40): # save_json(prompts, filename) # save root node to_dict to json save_json(self.to_dict(), f"{filepath}_root.json") - return dot + + # draw interactive graph + self.draw_interactive_html_graph( + filepath=filepath, nodes=[n for n in nodes], edges=edges + ) + return {"graph_path": filepath, "root_path": f"{filepath}_root.json"} def to_dict(self): return { "name": self.name, + "id": self.id, "role_desc": self.role_desc, "data": str(self.data), "requires_opt": self.requires_opt, @@ -682,20 +964,3 @@ def from_dict(cls, data: dict): def __repr__(self): return f"Parameter(name={self.name}, requires_opt={self.requires_opt}, param_type={self.param_type}, role_desc={self.role_desc}, data={self.data}, predecessors={self.predecessors}, gradients={self.gradients},\ raw_response={self.raw_response}, input_args={self.input_args}, traces={self._traces})" - - -def _check_and_reduce_gradients(variable: Parameter) -> Set[Parameter]: - - if variable.get_gradient_and_context_text() == "": - log.debug(f"No gradients detected for {variable.data}") - return variable.gradients - if len(variable.gradients) == 1: - log.debug(f"Only one gradient, no need to reduce: {variable.gradients}") - return variable.gradients - else: - log.debug( - f"Multiple gradients detected for {variable.data}. But we are not reducting them." - ) - return variable.gradients - - # TODO: Implement the reduction logic later diff --git a/adalflow/adalflow/optim/text_grad/backend_engine_prompt.py b/adalflow/adalflow/optim/text_grad/backend_engine_prompt.py index e3b608625..a5f3ddb13 100644 --- a/adalflow/adalflow/optim/text_grad/backend_engine_prompt.py +++ b/adalflow/adalflow/optim/text_grad/backend_engine_prompt.py @@ -11,31 +11,96 @@ FEEDBACK_ENGINE_TEMPLATE = r""" You are the feedback engine in an optimization system. -Your role: Provide intelligent and creative feedback for the variable enclosed in tags, based on the objective specified in tags. +Your task is to provide intelligent and creative feedback for the target variable enclosed in tags, +so that the optimizer can optimize this variable to improve the objective enclosed in tags. + 1. Focus on the downstream OBJECTIVE without proposing new versions of the variable. 2. Feedback examples: "Since language models have the X failure mode...", "Adding X can fix this error because...", "Removing X can improve the objective function because...", "Changing X to Y would fix the mistake..." 3. Consider the variable in the context of its peers if provided. + Remember: -Be concise, critical, and direct. +Be specific, concise, critical, and direct. {{conversation_sec}} {{objective_instruction_sec}} """ +############################################## +# Loss Component +############################################## + + +# Objective instruction for LLM as gradComponent with user custom instruction + +# OBJECTIVE_INSTRUCTION_BASE = r""" +# Our only goal is to improve the above metric, and nothing else. +# {% if instruction_to_backward_engine %} +# Note: {{instruction_to_backward_engine}} +# {% endif %} +# """ + +OBJECTIVE_INSTRUCTION_BASE = r""" +Your only goal is to clearly states how it obtained the "". +Especially when the score is low. +Be CONCISE. +Be specific on why it has a low score. +e.g. "The retrieved context is not enough to answer the question so the problem relies on the retrieval part." +""" + + +### Variable to get feedback on, often it is pred in the loss component +LOSS_CONVERSATION_START_INSTRUCTION_STRING_FN = r""" +TARGET VARIABLE: + {{variable_name}} + {{variable_desc}} + {{variable_value}} +{{conversation_str}} +""" + +### Loss/Score Information ### +LOSS_CONVERSATION_TEMPLATE_STRING = r""" +The variable is passed to the eval function and compared with a target/ground truth value. + +: {{eval_fn_desc}} +: {{input_str}} +: {{response_value}} +{% if metadata %} +Note: {{metadata}} +{% endif %}""" + + +############################################## +# LLM as gradComponent +############################################## +# When the parameter has a gradient, it is the continuation of the backpropagation chain, a layer in the models +CONVERSATION_START_INSTRUCTION_CHAIN = r""" +{{variable_and_peers_info}} + +Here is a conversation with the language model (LM): +{{conversation_str}} +""" + +OBJECTIVE_INSTRUCTION_CHAIN = r""" +This conversation is part of a larger system. The was later used as {{response_desc}}. + +Your goal is to give feedback to the variable to guide the LLM_OUTPUT according to feedback: {{response_gradient}} +{% if instruction_to_backward_engine %} +Note: {{instruction_to_backward_engine}} +{% endif %} +""" ### Backward engine: user prompt # First part to provide context of LLM as gradComponent +# The target variable is used as either input or a task instruction to a language model (LM): +# replace the "The target variable is used as either input or a task instruction to a language model (LM):" with the {{variable_desc}} +# NAME: {{variable_name}} +# Description: {{variable_desc}} LLM_CONVERSATION_TEMPLATE = r""" -NAME: {{variable_name}} -The target variable is used as either input or a task instruction to a language model (LM): - LM_INPUT: {{input_value}} LM_OUTPUT: {{llm_output}}""" -# only passing variable (dict) and peers as parameters -# shared between the VARIABLE_AND_PEERS_INFO = r""" {{variable.name}} @@ -62,37 +127,11 @@ {% endif %} """ -# When the parameter has no gradient, it is the start of the backpropagation chain, used as a loss function -CONVERSATION_START_INSTRUCTION_BASE = r""" -{{variable_and_peers_info}} - -Here is an evaluation of the variable using a language model: -{{conversation_str}} -""" -# When the parameter has a gradient, it is the continuation of the backpropagation chain, a layer in the models -CONVERSATION_START_INSTRUCTION_CHAIN = r""" -{{variable_and_peers_info}} - -Here is a conversation with a language model (LM): -{{conversation_str}} -""" - -# Objective instruction for LLM as gradComponent with user custom instruction +# # When the parameter has no gradient, it is the start of the backpropagation chain, used as a loss function +# CONVERSATION_START_INSTRUCTION_BASE = r""" +# {{variable_and_peers_info}} -OBJECTIVE_INSTRUCTION_BASE = r""" -Our only goal is to improve the above metric, and nothing else. -{% if instruction_to_backward_engine %} -Note: {{instruction_to_backward_engine}} -{% endif %} -""" - - -OBJECTIVE_INSTRUCTION_CHAIN = r""" -This conversation is part of a larger system. The was later used as {{response_desc}}. - -Your goal is to give feedback to the variable with the LLM_OUTPUT: {{response_gradient}} -{% if instruction_to_backward_engine %} -Note: {{instruction_to_backward_engine}} -{% endif %} -""" +# Here is an evaluation of the variable using a language model: +# {{conversation_str}} +# """ diff --git a/adalflow/adalflow/optim/text_grad/llm_text_loss.py b/adalflow/adalflow/optim/text_grad/llm_text_loss.py index 46cf7a0f6..d34373e1f 100644 --- a/adalflow/adalflow/optim/text_grad/llm_text_loss.py +++ b/adalflow/adalflow/optim/text_grad/llm_text_loss.py @@ -1,4 +1,6 @@ -"""Implementation of TextGrad: Automatic “Differentiation” via Text""" +"""Implementation of TextGrad: Automatic “Differentiation” via Text. +This code is not used as we treat the non-optimizable version of LLM judge as a form of eval_fn. +We use class EvalFnToTextLoss instead as of today 12/9/2024""" from typing import Union, TYPE_CHECKING diff --git a/adalflow/adalflow/optim/text_grad/ops.py b/adalflow/adalflow/optim/text_grad/ops.py index da2b438fe..ddce60dcc 100644 --- a/adalflow/adalflow/optim/text_grad/ops.py +++ b/adalflow/adalflow/optim/text_grad/ops.py @@ -76,8 +76,11 @@ def forward(self, params: List[Parameter]) -> Parameter: def backward(self, summation: Parameter): """ - Performs the backward pass of the sum operation. - This is simply an idempotent operation, where we make a gradient with the combined feedback and add it to the predecessors'grads. + Computes gradients for the predecessors of the sum operation. + There is no gradient computation for the sum operation itself. + It is a simple way to combine multiple losses for convenience. + + sum.backward() => [loss1.backward(), loss2.backward(), ...] :param summation: The parameter representing the sum. :type summation: Parameter diff --git a/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py b/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py index c8654d4a0..89ebd471a 100644 --- a/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py +++ b/adalflow/adalflow/optim/text_grad/text_loss_with_eval_fn.py @@ -16,20 +16,25 @@ from adalflow.core.prompt_builder import Prompt from adalflow.eval.base import BaseEvaluator +from adalflow.optim.text_grad.backend_engine_prompt import ( + LOSS_CONVERSATION_TEMPLATE_STRING, + LOSS_CONVERSATION_START_INSTRUCTION_STRING_FN, + OBJECTIVE_INSTRUCTION_BASE, +) log = logging.getLogger(__name__) ### Loss/Score Information ### -CONVERSATION_TEMPLATE_STRING = r""" -The variable is passed to the eval function and compared with a target/ground truth value. +# LOSS_CONVERSATION_TEMPLATE_STRING = r""" +# The variable is passed to the eval function and compared with a target/ground truth value. -: {{eval_fn_desc}} -: {{input_str}} -: {{response_value}} -{% if metadata %} -Note: {{metadata}} -{% endif %}""" +# : {{eval_fn_desc}} +# : {{input_str}} +# : {{response_value}} +# {% if metadata %} +# Note: {{metadata}} +# {% endif %}""" # Does not have gradient on the output, the loss function of the backpropagation chain @@ -41,22 +46,22 @@ # Has the gradient on the output, the layer in the backpropagation chain # Conversation will be provided differently. -### Variable Information ### -CONVERSATION_START_INSTRUCTION_STRING_FN = r""" -TARGET VARIABLE: -: {{variable_name}} - {{variable_desc}} - {{variable_value}} -{{conversation_str}} -""" +# ### Variable Information ### +# CONVERSATION_START_INSTRUCTION_STRING_FN = r""" +# TARGET VARIABLE: +# {{variable_name}} +# {{variable_desc}} +# {{variable_value}} +# {{conversation_str}} +# """ # Third part of the user prompt -OBJECTIVE_INSTRUCTION_BASE = r""" -Your only goal is to clearly states how it obtained the "". -Especially when the score is low. -Be CONCISE. -If you have enough context, add a more specific feedback on how it failed. -""" +# OBJECTIVE_INSTRUCTION_BASE = r""" +# Your only goal is to clearly states how it obtained the "". +# Especially when the score is low. +# Be CONCISE. +# If you have enough context, add a more specific feedback on how it failed. +# """ OBJECTIVE_INSTRUCTION_CHAIN = r"""This conversation is part of a larger system. The was later used as "{{response_name}}: {{response_desc}}". @@ -206,7 +211,7 @@ def _backward_through_one_predecessor( response: Parameter, eval_fn_desc: str, backward_engine: "BackwardEngine", - is_chain: bool = False, + is_intermediate_node: bool = False, # if the node is an intermediate node in the backpropagation chain metadata: Dict[str, str] = None, ): if not pred.requires_opt: @@ -214,7 +219,9 @@ def _backward_through_one_predecessor( f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization." ) return - log.debug(f"EvalFnToTextLoss: Backward through {pred}, is_chain: {is_chain}") + log.debug( + f"EvalFnToTextLoss: Backward through {pred}, is_intermediate_node: {is_intermediate_node}" + ) if pred.check_if_already_computed_gradient_respect_to(response.id): log.info( @@ -237,7 +244,7 @@ def _backward_through_one_predecessor( # response information conversation_str = Prompt( - CONVERSATION_TEMPLATE_STRING, + LOSS_CONVERSATION_TEMPLATE_STRING, prompt_kwargs={ "input_str": inputs_string, "eval_fn_desc": eval_fn_desc, @@ -246,10 +253,10 @@ def _backward_through_one_predecessor( }, )() - conv_ins_template = CONVERSATION_START_INSTRUCTION_STRING_FN + conv_ins_template = LOSS_CONVERSATION_START_INSTRUCTION_STRING_FN obj_ins_template = OBJECTIVE_INSTRUCTION_BASE - if is_chain: + if is_intermediate_node: # conv_ins_template = CONVERSATION_START_INSTRUCTION_STRING_FN_CHAIN obj_ins_template = OBJECTIVE_INSTRUCTION_CHAIN @@ -315,6 +322,7 @@ def _backward_through_one_predecessor( ) # backward the end to end score + # TODO: not really useful pred.set_score(response.data) print(f"setting pred name {pred.name} score to {response.data}") @@ -335,11 +343,11 @@ def backward( """ log.info(f"EvalFnToTextLoss: Backward: {response}") children_params = response.predecessors - is_chain = True + is_intermediate_node = False response_gradient_context = response.get_gradient_and_context_text().strip() - if response_gradient_context == "": - log.info(f"EvalFnToTextLoss: Backward: No gradient found for {response}.") - is_chain = False + if response_gradient_context != "": + log.info("EvalFnToTextLoss is an intermediate node.") + is_intermediate_node = True log.info(f"response_gradient_context: {response_gradient_context}") # go through all child parameters @@ -364,17 +372,27 @@ def backward( response, eval_fn_desc, backward_engine, - is_chain, + is_intermediate_node, metadata, ) # backward for the score for the demo for pred in children_params: - if not pred.requires_opt: - log.debug( - f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization." + # if not pred.requires_opt: + # log.debug( + # f"EvalFnToTextLoss: Skipping {pred} as it does not require optimization." + # ) + # continue + if not isinstance(response.data, float): + raise TypeError( + f"EvalFnToTextLoss: response.data must be a float. Got {type(response.data)}." ) - continue - pred._score = float(response.data) + pred._score = response.data + from adalflow.utils.logger import printc + + printc( + f"EvalFnToTextLoss: {pred.name} set_score: {response.data}, {response.name}", + "blue", + ) log.info(f"setting pred name {pred.name} score to {response.data}") diff --git a/adalflow/adalflow/optim/text_grad/tgd_optimizer.py b/adalflow/adalflow/optim/text_grad/tgd_optimizer.py index f2d5b918b..219c299a5 100644 --- a/adalflow/adalflow/optim/text_grad/tgd_optimizer.py +++ b/adalflow/adalflow/optim/text_grad/tgd_optimizer.py @@ -6,18 +6,19 @@ Source code: https://github.com/google-deepmind/opro """ -from typing import List, Dict, TYPE_CHECKING, Optional +from typing import List, Dict, TYPE_CHECKING, Optional, Any from collections import defaultdict import logging import re from dataclasses import field, dataclass - from adalflow.optim.optimizer import TextOptimizer, ParamsT from adalflow.optim.text_grad.backend_engine_prompt import VARIABLE_AND_PEERS_INFO from adalflow.optim.parameter import Parameter from adalflow.core.base_data_class import DataClass +from adalflow.tracing.decorators import trace_generator_states + if TYPE_CHECKING: from adalflow.core import ModelClient @@ -26,34 +27,6 @@ log = logging.getLogger(__name__) -# Tips: -# 1. Eliminate unnecessary words or phrases. -# 2. Add new elements to address specific feedback. -# 3. Be creative and present the variable differently. -OPTIMIZER_SYSTEM_PROMPT = r""" -You are part of an optimization system that refines existing variable values based on feedback. - -Your task: Propose a new variable value in response to the feedback. -1. Address the concerns raised in the feedback while preserving positive aspects. -2. Observe past performance patterns when provided and to keep the good quality. -3. Consider the variable in the context of its peers if provided. - FYI: - - If a peer will be optimized itself, do not overlap with its scope. - - Otherwise, you can overlap if it is necessary to address the feedback. - -Output: -Provide only the new variable value between {{new_variable_start_tag}} and {{new_variable_end_tag}} tags. - -Tips: -1. Eliminate unnecessary words or phrases. -2. Add new elements to address specific feedback. -3. Be creative and present the variable differently. -{% if instruction_to_optimizer %} -4. {{instruction_to_optimizer}} -{% endif %} -""" - - @dataclass class HistoryPrompt(DataClass): id: str @@ -61,16 +34,24 @@ class HistoryPrompt(DataClass): eval_score: float +#################################################################################################### +# Textual Gradient Descent Optimizer +#################################################################################################### +# {% if failed_proposals %} +# Here are the past failed proposals: +# {% for failed_proposal in failed_proposals %} +# {{loop.index}}. {{failed_proposal}} +# {% endfor %} +# {% endif %} TEXT_GRAD_DESC_TEMPLATE = r""" {{optimizer_system_prompt}} -{#Variable and feedback#} -{{variable_and_peers_info}} -{# ORPO past history #} + +{# OPRO past history #} {% if past_history %} -Here are the past iterations of this variable along with the validation score. +Here are the best past iterations of this variable along with the validation score. {% for history in past_history %} {{loop.index}}. {{history}} {% endfor %} @@ -100,9 +81,41 @@ class HistoryPrompt(DataClass): You must base on the following examples when modifying the {{variable_desc}}: {{in_context_examples}} {% endif %} +YOU MUST ENSURE the new variable shares the same intent as the original variable. +You can either rephrase the initial variable, or add more specific instructions based on the feedback. +You can not change the variable to only fit on one sample if the batch size is larger than 1. """ +# optimizer system prompt + +# Tips: +# 1. Eliminate unnecessary words or phrases. +# 2. Add new elements to address specific feedback. +# 3. Be creative and present the variable differently. +# Provide only the new variable value between {{new_variable_start_tag}} and {{new_variable_end_tag}} tags. +OPTIMIZER_SYSTEM_PROMPT = r""" +You are part of an optimization system that refines existing variable based on feedback generated on a batch of input data. + +1. Address the concerns raised in the feedback while preserving positive aspects. +3. Observe past performance patterns when provided and to keep the good quality. +4. Consider the variable in the context of its peers if provided. + FYI: + - If a peer will be optimized itself, do not overlap with its scope. + - Otherwise, you can overlap if it is necessary to address the feedback. + +{{output_format_str}} + + +Tips: +1. Eliminate unnecessary words or phrases. +2. Add new elements to address specific feedback. +3. Be creative and present the variable differently. +{% if instruction_to_optimizer %} +4. {{instruction_to_optimizer}} +{% endif %} +""" + @dataclass class Instruction(DataClass): @@ -119,6 +132,25 @@ class Instruction(DataClass): ) +@dataclass +class TGDData(DataClass): + reasoning: str = field(metadata={"desc": "Why the variable is proposed this way"}) + proposed_variable: str = field(metadata={"desc": "The proposed variable"}) + + +@dataclass +class TGDOptimizerTrace: + api_kwargs: Dict[str, Any] = field( + metadata={ + "desc": "The api_kwargs for components like Generator and Retriever that pass to the model client" + }, + default=None, + ) + output: TGDData = field( + metadata={"desc": "The output of the TGD optimizer"}, default=None + ) + + new_variable_tags = ["", ""] @@ -134,6 +166,7 @@ def extract_new_variable(text: str) -> str: return matches[0].strip() +@trace_generator_states() class TGDOptimizer(TextOptimizer): __doc__ = """Textual Gradient Descent(LLM) optimizer for text-based variables.""" @@ -141,6 +174,7 @@ class TGDOptimizer(TextOptimizer): params: ParamsT constraints: List[str] params_history: Dict[str, List[HistoryPrompt]] = {} # id to history + # failed_proposals: Dict[str, List[HistoryPrompt]] = {} # only need the value def __init__( self, @@ -153,18 +187,25 @@ def __init__( in_context_examples: List[str] = None, # TODO: in-context examples num_gradient_memory: int = 0, # TODO: gradient memory and momentum, for now it is not useful max_past_history: int = 3, + # max_failed_proposals: int = 3, ): from adalflow.core.generator import Generator from adalflow.core import Prompt + from adalflow.components.output_parsers.dataclass_parser import DataClassParser super().__init__() self.params = params self.constraints = constraints or [] + self.data_class = TGDData + self.output_parser = DataClassParser( + data_class=self.data_class, return_data_class=True, format_type="json" + ) self.optimizer_system_prompt = Prompt( template=optimizer_system_prompt, prompt_kwargs={ - "new_variable_start_tag": new_variable_tags[0], - "new_variable_end_tag": new_variable_tags[1], + # "new_variable_start_tag": new_variable_tags[0], + # "new_variable_end_tag": new_variable_tags[1], + "output_format_str": self.output_parser.get_output_format_str(), }, ) self.variable_and_peers_info = Prompt( @@ -177,17 +218,21 @@ def __init__( self.num_gradient_memory = num_gradient_memory self.gradient_memory_dict = defaultdict(list) # id to num_gradient_memory self.do_gradient_memory = self.num_gradient_memory > 0 + self.llm_optimizer = Generator( model_client=model_client, model_kwargs=model_kwargs, template=TEXT_GRAD_DESC_TEMPLATE, + output_processors=self.output_parser, ) self.max_past_history = max_past_history + # self.max_failed_proposals = max_failed_proposals # initate the past history for each parameter for param in self.params: self.params_history[param.id] = [] + # self.failed_proposals[param.id] = [] @property def constraint_text(self): @@ -242,6 +287,40 @@ def render_history(self, param_id: str) -> List[str]: history.to_yaml(exclude=["id"]) for history in self.params_history[param_id] ] + # def add_failed_proposal(self): + # """Save a copy of the current value of the parameter in the failed proposals.""" + # for param in self.params: + # failed_proposal = HistoryPrompt( + # id=param.id, + # value=param.data, + # eval_score=None, + # ) + # self.failed_proposals[param.id].append(failed_proposal) + # if len(self.failed_proposals[param.id]) > self.max_failed_proposals: + # for _ in range( + # len(self.failed_proposals[param.id]) - self.max_failed_proposals + # ): + # self.failed_proposals[param.id].pop() + # # if param_id not in self.failed_proposals: + # # self.failed_proposals[param_id] = [] + # # failed_proposal = HistoryPrompt( + # # id=param_id, + # # value=value, + # # eval_score=None, + # # ) + # # self.failed_proposals[param_id].append(failed_proposal) + # # if len(self.failed_proposals[param_id]) > self.max_failed_proposals: + # # for _ in range(len(self.failed_proposals[param_id]) - self.max_failed_proposals): + # # self.failed_proposals[param_id].pop() + + # def render_failed_proposals(self, param_id: str) -> List[str]: + # if param_id not in self.failed_proposals: + # return [] + # return [ + # history.to_yaml(exclude=["id", "eval_score"]) + # for history in self.failed_proposals[param_id] + # ] + # TODO: optimize with adalflow template for better readability def get_gradient_memory_text(self, param: Parameter) -> str: grad_memory = "" @@ -260,7 +339,9 @@ def _get_user_prompt_kwargs(self, param: Parameter) -> Dict[str, str]: user_prompt_kwargs = { "variable_and_peers_info": variable_and_peer_info, - "variable_grad": param.get_gradient_and_context_text(), + "variable_grad": param.get_gradient_and_context_text( + skip_correct_sample=True + ), # constraints "constraint_text": self.constraint_text if self.do_constrained else None, # in-context examples @@ -279,6 +360,12 @@ def _get_user_prompt_kwargs(self, param: Parameter) -> Dict[str, str]: "past_history": ( self.render_history(param.id) if self.max_past_history else None ), + # failed proposals + # "failed_proposals": ( + # self.render_failed_proposals(param.id) + # if self.max_failed_proposals + # else None + # ), } return user_prompt_kwargs @@ -286,7 +373,7 @@ def _get_user_prompt_kwargs(self, param: Parameter) -> Dict[str, str]: # TODO: better way to update the gradient memory def update_gradient_memory(self, param: Parameter): self.gradient_memory_dict[param.id].append( - {"value": param.get_gradient_and_context_text()} + {"value": param.get_gradient_and_context_text(skip_correct_sample=True)} ) def zero_grad(self): @@ -299,6 +386,8 @@ def propose(self): if self.proposing: raise ValueError("Already proposing a value.") + print("Proposing a new value.") + # no cache so that new proposal can be made no_cache = True # print("Proposing a new value.") @@ -327,12 +416,22 @@ def propose(self): ) prompt_str = self.llm_optimizer.get_prompt(**prompt_kwargs) log.debug(f"TGD LLM optimizer prompt: {prompt_str}") - proposed_data = response.data + proposed_data: TGDData = ( + response.data + if response.data + else TGDData( + reasoning="No reasoning", proposed_variable=response.raw_response + ) + ) log.info(f"Response from the optimizer: {response}") # extract the improved variable from the response # TODO: make it more robust - improved_variable = extract_new_variable(proposed_data) + # improved_variable = extract_new_variable(proposed_data) + improved_variable = proposed_data.proposed_variable param.propose_data(improved_variable) + param.trace_optimizer(api_kwargs=prompt_str, response=response) + print(f"prompt_str: {prompt_str}") + print(f"response: {response}") if self.do_gradient_memory: self.update_gradient_memory(param) self.proposing = True @@ -345,6 +444,7 @@ def revert(self): if not param.requires_opt: continue param.revert_data() + param.trace_optimizer(api_kwargs=None, response=None) self.proposing = False def step(self): diff --git a/adalflow/adalflow/optim/trainer/adal.py b/adalflow/adalflow/optim/trainer/adal.py index f9bcfc108..cea317605 100644 --- a/adalflow/adalflow/optim/trainer/adal.py +++ b/adalflow/adalflow/optim/trainer/adal.py @@ -249,13 +249,18 @@ def evaluate_samples( ) for future in concurrent.futures.as_completed(futures): - i = futures[future] - acc_list[i] = ( - future.result() - ) # Place the result in the correct position - progress_bar.update( - 1 - ) # Update progress bar after each result is collected + try: + i = futures[future] + acc_list[i] = ( + future.result() + ) # Place the result in the correct position + progress_bar.update( + 1 + ) # Update progress bar after each result is collected + except Exception as e: + + progress_bar.close() + raise ValueError(f"Exception in task {i}: {e}") avg_score = float(np.mean(np.array(acc_list))) return EvaluationResult(avg_score=avg_score, per_item_scores=acc_list) @@ -394,6 +399,11 @@ def train_step(self, batch, batch_idx, num_workers: int = 2) -> List: samples[i] = sample # Keep the sample order aligned # check the ordering + if isinstance(y_pred, Parameter): + raise ValueError(f"y_pred_{i} is a Parameter, {y_pred}") + + print(f"y_pred: {y_pred})") + assert ( y_pred.id == sample.id ), f"ID mismatch: {y_pred.id} != {sample.id}, type: {type(y_pred)}" @@ -469,14 +479,13 @@ def validation_step(self, batch, batch_idx, num_workers: int = 2) -> List: """ # TODO: let use decide which mode to be self.task.eval() + self.task.use_teacher(mode=False) # ensure the teacher is not used completed_y_preds, completed_samples, index_to_score = self.pred_step( batch, batch_idx, num_workers, running_eval=True, min_score=minimum_score ) if index_to_score: # compute score from index_to_score - print( - f"completed_samples: {len(completed_samples)}, len: {len(list(index_to_score.values()))}" - ) + avg_score = np.mean(list(index_to_score.values())).item() acc_list = [None] * len(index_to_score) for i, score in index_to_score.items(): @@ -598,7 +607,9 @@ def configure_backward_engine_helper( if self.loss_fn: self.loss_fn.set_backward_engine(self.backward_engine) - def configure_callbacks(self, save_dir: Optional[str] = "traces", *args, **kwargs): + def configure_callbacks( + self, save_dir: Optional[str] = "traces", *args, **kwargs + ) -> List[str]: """In default we config the failure generator callback. User can overwrite this method to add more callbacks.""" from adalflow.utils.global_config import get_adalflow_default_root_path import os @@ -606,7 +617,7 @@ def configure_callbacks(self, save_dir: Optional[str] = "traces", *args, **kwarg if not save_dir: save_dir = "traces" save_dir = os.path.join(get_adalflow_default_root_path(), save_dir) - print(f"Saving traces to {save_dir}") + log.debug(f"Saving traces to {save_dir}") return self._auto_generator_callbacks(save_dir) def run_one_task_sample(self, sample: Any) -> Any: @@ -640,9 +651,10 @@ def _find_all_generators(self) -> List[Tuple[str, "Generator"]]: for name, comp in self.task.named_components(): if isinstance(comp, Generator): all_generators.append((name, comp)) + log.debug(f"all_generators: {all_generators}") return all_generators - def _auto_generator_callbacks(self, save_dir: str = "traces"): + def _auto_generator_callbacks(self, save_dir: str = "traces") -> List[str]: r"""Automatically generate callbacks.""" from adalflow.core.types import GeneratorOutput from adalflow.tracing.generator_call_logger import ( @@ -652,7 +664,7 @@ def _auto_generator_callbacks(self, save_dir: str = "traces"): all_generators = self._find_all_generators() - print(f"all_generators: {all_generators}") + log.debug(f"all_generators: {all_generators}") def _on_completion_callback( output: GeneratorOutput, @@ -672,9 +684,10 @@ def _on_completion_callback( # Register the callback for each generator file_paths = [] + call_logger = GeneratorCallLogger(save_dir=save_dir) for name, generator in all_generators: - call_logger = GeneratorCallLogger(save_dir=save_dir) - call_logger.reset() + + # call_logger.reset() call_logger.register_generator(name) logger_call = partial(call_logger.log_call, name) generator.register_callback( @@ -682,10 +695,7 @@ def _on_completion_callback( ) file_path = call_logger.get_log_location(name) file_paths.append(file_path) - print( - f"Registered callback for {name}, file path: {file_path}", - end="\n", - ) + log.debug(f"Registered callback for {name}, file path: {file_path}") return file_paths def configure_demo_optimizer_helper(self) -> List[DemoOptimizer]: diff --git a/adalflow/adalflow/optim/trainer/trainer.py b/adalflow/adalflow/optim/trainer/trainer.py index ae17b064a..91a2fd167 100644 --- a/adalflow/adalflow/optim/trainer/trainer.py +++ b/adalflow/adalflow/optim/trainer/trainer.py @@ -27,11 +27,11 @@ from adalflow.utils import save_json, load_json from adalflow.utils.cache import hash_text_sha1 from adalflow.utils.data import DataLoader - +from adalflow.utils.logger import printc from adalflow.optim.types import TrainerValidateStats -log = logging.getLogger(__name__) +logger = logging.getLogger(__name__) class Trainer(Component): @@ -91,9 +91,10 @@ class Trainer(Component): batch_val_score_threshold: Optional[float] = ( 1.0 # when acc_score >= this threshold, skip this batch ) - max_error_samples: Optional[int] = 8 - max_correct_samples: Optional[int] = 8 + max_error_samples: Optional[int] = 2 + max_correct_samples: Optional[int] = 2 debug: bool = False + sequential_order: List[str] = ["text", "demo"] def __init__( self, @@ -105,8 +106,8 @@ def __init__( num_workers: int = 4, ckpt_path: str = None, batch_val_score_threshold: Optional[float] = 1.0, - max_error_samples: Optional[int] = 4, - max_correct_samples: Optional[int] = 4, + max_error_samples: Optional[int] = 2, + max_correct_samples: Optional[int] = 2, max_proposals_per_step: int = 5, train_loader: Optional[Any] = None, train_dataset: Optional[Any] = None, @@ -119,6 +120,7 @@ def __init__( exclude_input_fields_from_bootstrap_demos: bool = False, debug: bool = False, save_traces: bool = False, # save traces in the few-shto demos + sequential_order: List[str] = ["text", "demo"], *args, **kwargs, ) -> None: @@ -161,6 +163,7 @@ def __init__( self.exclude_input_fields_from_bootstrap_demos = ( exclude_input_fields_from_bootstrap_demos ) + self.sequential_order = sequential_order # TODO: need to support checkpoint resume too! def diagnose(self, dataset: Any, split: str = "train"): @@ -188,7 +191,8 @@ def diagnose(self, dataset: Any, split: str = "train"): trainer_state = self.gather_trainer_states() self.prep_ckpt_file_path(trainer_state) save_path = os.path.join(self.ckpt_path, f"diagnose_{split}") - print(f"Save diagnose to {save_path}") + logger.debug(f"Save diagnose to {save_path}") + # One generator will be one file, all stats are in logger_metadata.json log_paths = self.adaltask.configure_callbacks(save_dir=save_path) # 2. evaluate acc = self.adaltask.validation_step(dataset, 0, self.num_workers) @@ -206,15 +210,18 @@ def diagnose(self, dataset: Any, split: str = "train"): raise ValueError( "dataset should have an attribute id for tracking the samples" ) - print(f"sorted_indices: {sorted_indices}") + logger.debug(f"sorted_indices: {sorted_indices}") + sorted_scores = [acc_per_item_scores[i] for i in sorted_indices] - print(f"sorted_scores: {sorted_scores}") + logger.debug(f"sorted_scores: {sorted_scores}") sorted_dataset = [dataset[i] for i in sorted_indices] + paths: Dict[str, List[str]] = {"Log": log_paths, "Diagnose": [], "Stats": []} + # reorder the samples based on the score for log_path in log_paths: file_name = os.path.basename(log_path) - print(f"Loading log file: {file_name}") + logger.debug(f"Loading log file: {file_name}") logs = load_jsonl(log_path) try: logs_dict = {log["output"]["id"]: log for log in logs} @@ -232,6 +239,7 @@ def diagnose(self, dataset: Any, split: str = "train"): diagnose_file = os.path.join(log_dir, diagnose_filename) diagnose_items = [] + stats_list: List[Dict] = [] for i, log in enumerate(sorted_logs): if log["score"] < 0.5: diagnose_item = { @@ -252,16 +260,68 @@ def diagnose(self, dataset: Any, split: str = "train"): "total_error_samples": len(diagnose_items), "avg_score": acc_score, } - save_json(stats, os.path.join(log_dir, "stats.json")) - print(f"Total error samples: {len(diagnose_items)}") - print(f"Saved diagnose to {diagnose_file}") + stat_path = os.path.join(log_dir, "stats.json") + save_json(stats, stat_path) + logger.debug(f"Total error samples: {len(diagnose_items)}") + logger.debug(f"Saved diagnose to {diagnose_file}") + paths["Diagnose"].append(diagnose_file) + paths["Stats"].append(stat_path) + stats_list.append(stats) + + self.diagnose_report( + split=split, + acc_score=acc_score, + stats_list=stats_list, + log_paths=paths, + ) - return acc_score, acc_per_item_scores, log_paths + def diagnose_report( + self, + split: str, + acc_score: Optional[float] = None, + stats_list: Optional[List[Dict]] = None, + log_paths: Optional[Dict[str, List[str]]] = None, + ): + import colorama + from colorama import Fore + + # Initialize colorama + colorama.init(autoreset=True) + print(Fore.CYAN + "\n================== DIAGNOSE REPORT ==================\n") + + print(Fore.GREEN + f"✔ Split: {split}") + + # Check the accuracy score + if acc_score is not None: + print(Fore.GREEN + f"✔ Overall accuracy score: {acc_score:.2f}") + else: + print(Fore.RED + "✘ Accuracy score not provided or calculated.") + + # List the overall stats + if stats_list is not None and len(stats_list) > 0: + print(Fore.GREEN + "✔ Overall stats:") + for idx, item in enumerate(stats_list): + print(Fore.YELLOW + f" - {idx + 1}: {item}") + + # Check for log paths + if log_paths is not None: + for key, paths in log_paths.items(): + if len(paths) > 0: + print(Fore.GREEN + f"✔ {key} paths:") + for idx, path in enumerate(paths): + print(Fore.YELLOW + f" - {key} {idx + 1}: {path}") + + else: + print(Fore.RED + "✘ No log paths available.") + + # General summary + print(Fore.GREEN + "\n✔ Diagnose report completed successfully!") + print(Fore.CYAN + "\n=====================================================\n") def debug_report( self, - text_grad_debug_path: Optional[str] = None, - few_shot_demo_debug_path: Optional[str] = None, + text_grad_debug_path: Optional[Dict[str, object]] = None, + few_shot_demo_debug_path: Optional[Dict[str, object]] = None, ): import colorama from colorama import Fore @@ -273,7 +333,7 @@ def debug_report( if text_grad_debug_path: print(Fore.GREEN + f"✔ Text grad debug path: {text_grad_debug_path}") else: - print(Fore.RED + "✘ Text grad debugging was not run.") + print(Fore.CYAN + "✘ Text grad debugging was not run.") if few_shot_demo_debug_path: print( @@ -304,9 +364,12 @@ def fit( resume_from_ckpt: Optional[ str ] = None, # TODO: have a more comprehensive ckpt loading in the future - ): + ) -> Tuple[str, TrainerResult]: r""" train_loader: An iterable or collection of iterables specifying training samples. + + Returns: + Tuple[str, TrainerResult]: Checkpoint file and the TrainerResult object """ start_time = time.time() @@ -434,7 +497,7 @@ def fit( train_loader, train_dataset, val_dataset, test_dataset ) self.debug_report(text_grad_debug_path, few_shot_demo_debug_path) - return + return self.ckpt_file, trainer_results ########Run text_optimizers and demo optimizers in sequential order ######## if ( @@ -443,7 +506,6 @@ def fit( and len(self.text_optimizers) > 0 ): if self.strategy == "random": - self._fit_text_grad_demo_mix_random( train_loader, train_dataset, @@ -465,41 +527,67 @@ def fit( raise ValueError(f"Strategy {self.strategy} not supported") else: # sequential, text first and demo second - if len(self.text_optimizers) > 0: - if self.strategy == "random": - trainer_results = self._fit_text_grad_random( - train_loader, - val_dataset, - test_dataset, - trainer_results, - starting_step=starting_step, - ) - starting_step += self.max_steps - elif self.strategy == "constrained": - trainer_results = self._fit_text_grad_constraint( + + def run_text_optimizers(starting_step: int, trainer_results: TrainerResult): + if len(self.text_optimizers) > 0: + if self.strategy == "random": + trainer_results = self._fit_text_grad_random( + train_loader, + val_dataset, + test_dataset, + trainer_results, + starting_step=starting_step, + ) + starting_step += self.max_steps + elif self.strategy == "constrained": + trainer_results = self._fit_text_grad_constraint( + train_loader, + val_dataset, + test_dataset, + trainer_results=trainer_results, + starting_step=starting_step, + ) + starting_step += self.max_steps + else: + raise ValueError(f"Strategy {self.strategy} not supported") + + def run_demo_optimizers(starting_step: int, trainer_results: TrainerResult): + if len(self.demo_optimizers) > 0: + self.adaltask.configure_teacher_generator() + self._fit_demos_random( train_loader, + train_dataset, val_dataset, test_dataset, trainer_results=trainer_results, starting_step=starting_step, ) - starting_step += self.max_steps - else: - raise ValueError(f"Strategy {self.strategy} not supported") - if len(self.demo_optimizers) > 0: - self.adaltask.configure_teacher_generator() # attemp to use the newest teacher as - self._fit_demos_random( - train_loader, - train_dataset, - val_dataset, - test_dataset, - trainer_results=trainer_results, - starting_step=starting_step, - ) + + if self.sequential_order == ["text", "demo"]: + run_text_optimizers(starting_step, trainer_results) + run_demo_optimizers(starting_step, trainer_results) + else: + run_demo_optimizers(starting_step, trainer_results) + run_text_optimizers(starting_step, trainer_results) + # if len(self.text_optimizers) > 0: + # run_text_optimizers(starting_step, trainer_results) + + # if len(self.demo_optimizers) > 0: + # run_demo_optimizers(starting_step, trainer_results) + # self.adaltask.configure_teacher_generator() # attemp to use the newest teacher as + # self._fit_demos_random( + # train_loader, + # train_dataset, + # val_dataset, + # test_dataset, + # trainer_results=trainer_results, + # starting_step=starting_step, + # ) end_time = time.time() print(f"Training time: {end_time - start_time}s") print(f"ckpt_file: {self.ckpt_file}") + return self.ckpt_file, trainer_results @staticmethod def _estimate_num_epochs(train_loader: Any, max_steps: int): @@ -582,7 +670,7 @@ def prep_ckpt_file_path(self, trainer_state: Dict[str, Any] = None): self.ckpt_path = os.path.join( default_root_path, "ckpt", self.adaltask.__class__.__name__ ) - print(f"Checkpoint path: {self.ckpt_path}") + logger.debug(f"Checkpoint path: {self.ckpt_path}") os.makedirs(self.ckpt_path, exist_ok=True) # list all existing checkpoints with the same file name prefix hash_key = ( @@ -627,7 +715,9 @@ def _pre_fit(self, val_dataset: Any, test_dataset: Any) -> TrainerResult: def _fit_demos_one_step_for_debug( self, train_loader, train_dataset: Any, val_dataset: Any, test_dataset: Any - ) -> str: + ) -> Dict[str, object]: + """Trace both the teacher and the student demos with scores and for sampling. + For demos: we need to run both the teacher mode and the student mode.""" # get_logger(level="DEBUG") print("Fitting using Random Demo Optimizer") @@ -659,7 +749,7 @@ def _fit_demos_one_step_for_debug( f"Expected 2 traces, got {len(demo_params[0]._traces)}, traces: {demo_params[0]._traces}" ) - print(f"Teacher y_preds: {y_preds[0].to_dict()}") + # print(f"Teacher y_preds: {y_preds[0].to_dict()}") y_preds_outputs = [p.full_response for p in y_preds] @@ -676,7 +766,7 @@ def _fit_demos_one_step_for_debug( losses: List[Parameter] = self.adaltask.loss_step( batch, y_preds, 0, self.num_workers ) - print(f"Losses: {losses[0].to_dict()}") + # print(f"Losses: {losses[0].to_dict()}") self._demo_optimizers_add_scores( [sample.id for sample in batch], batch_per_item_scores, is_teacher=True ) @@ -688,17 +778,21 @@ def _fit_demos_one_step_for_debug( print(f"Graph saved to {graph_path}") - # check the score + # check the score of one param for key, val in demo_params[0]._traces.items(): - print(f"param: {key}, val: {val}") + print(f"param: {key}, {demo_params[0].name}, val: {val}") score = val.score if score is None: raise ValueError("Score is None") print(f"param: {key}, score: {score}") - print(f"Loss after backward: {losses[0].to_dict()}") + # print(f"Loss after backward: {losses[0].to_dict()}") # tracking the bootstrap so we wont repeat the same samples + # 2. run student mode + + demo_debug_result_path = None + for batch_idx, batch in enumerate(train_loader): print(f"Training step: {batch_idx}") if batch_idx > 0: @@ -717,43 +811,51 @@ def _fit_demos_one_step_for_debug( ) # for loss in losses_student: # loss.backward() + # Check the eval result y_preds_outputs = [p.full_response for p in y_preds_student] eval_result = self.adaltask.evaluate_samples(batch, y_preds_outputs) print(f"Eval result: {eval_result.avg_score}") - eval_score_per_item = eval_result.per_item_scores - - # bootstrap - batch_for_teacher = [] - losses_teacher = [] - - for i, (sample, item_score) in enumerate(zip(batch, eval_score_per_item)): - # use teacher - if sample.id in pred_teacher: - continue - # if item_score < 0.5: - batch_for_teacher.append(sample) - pred_teacher.add(sample.id) - # run teacher, use teachers's output instead of the initial output (bootstrap) - if len(batch_for_teacher) > 0: - print(f"Using teacher for {len(batch_for_teacher)} samples") - self.adaltask.use_teacher() - y_preds_teacher = self.adaltask.train_step( - batch_for_teacher, batch_idx, self.num_workers - ) - losses_teacher: List[Parameter] = self.adaltask.loss_step( # noqa F841 - batch_for_teacher, y_preds_teacher, batch_idx, self.num_workers - ) - self._demo_optimizers_add_scores( - [sample.id for sample in batch_for_teacher], - eval_score_per_item, - is_teacher=True, - ) + # eval_score_per_item = eval_result.per_item_scores + + # bootstrap a batch + # batch_for_teacher = [] + # losses_teacher = [] + + # for i, (sample, item_score) in enumerate(zip(batch, eval_score_per_item)): + + # # use teacher + # if sample.id in pred_teacher: + # continue + # # if item_score < 0.5: + # pred_teacher.add(sample.id) + # batch_for_teacher.append(sample) + # # run teacher, use teachers's output instead of the initial output (bootstrap) + # if len(batch_for_teacher) > 0: + # print(f"Using teacher for {len(batch_for_teacher)} samples") + # self.adaltask.use_teacher() + # y_preds_teacher = self.adaltask.train_step( + # batch_for_teacher, batch_idx, self.num_workers + # ) + # losses_teacher: List[Parameter] = self.adaltask.loss_step( # noqa F841 + # batch_for_teacher, y_preds_teacher, batch_idx, self.num_workers + # ) + # self._demo_optimizers_add_scores( + # [sample.id for sample in batch_for_teacher], + # eval_score_per_item, + # is_teacher=True, + # ) + + # loss_students backward + for loss in losses_student: + loss.backward() # propose self._demo_optimizers_propose() graph_path = os.path.join(debug_path, "student_graph") - losses_student[0].draw_graph(filepath=graph_path) + demo_debug_result_path = losses_student[0].draw_graph( + filepath=graph_path + ) # noqa F841 # test step self._demo_optimizers_step() @@ -765,11 +867,13 @@ def _fit_demos_one_step_for_debug( opt_params = [] for opt in self.demo_optimizers: opt_params.extend(opt.params) - print(f"Opt params: {opt_params}") + # print(f"Opt params: {opt_params}") for name, param in self.adaltask.named_parameters(): if param.param_type == ParameterType.DEMOS: - print(f"Demo param: {name}, value: {param.data}, param: {param}") + print( + f"Demo param: {name}, value: {param.data}, param: {param.name}" + ) if param.data is None: raise ValueError("Demo param data is None") @@ -782,10 +886,12 @@ def _fit_demos_one_step_for_debug( if len(param._demos) == 0: raise ValueError(f"No demos found, param: {param}") - return debug_path + return demo_debug_result_path - def _fit_text_grads_one_step_for_debug(self, train_loader: Any) -> str: - print("Debugging fitting one step with batch size 2 for text optimizer") + def _fit_text_grads_one_step_for_debug(self, train_loader: Any) -> Dict[str, str]: + printc( + "Debugging fitting one step with batch size 2 for text optimizer", "blue" + ) self.prep_ckpt_file_path() debug_path = os.path.join(self.ckpt_path, "debug_text_grads") @@ -796,10 +902,13 @@ def _fit_text_grads_one_step_for_debug(self, train_loader: Any) -> str: self.adaltask.train() # this will turn everything to train mode correct_loss = None failed_loss = None - print("Finding one successful and one failed loss") + all_losses = [] + printc("Finding one successful and one failed loss", "blue") for batch in train_loader: y_preds = self.adaltask.train_step(batch, 0, self.num_workers) losses = self.adaltask.loss_step(batch, y_preds, 0, self.num_workers) + # Collect all losses + all_losses.extend(losses) for loss in losses: if loss.data > 0.5: correct_loss = loss @@ -808,13 +917,27 @@ def _fit_text_grads_one_step_for_debug(self, train_loader: Any) -> str: if correct_loss is not None and failed_loss is not None: print("Found correct and failed loss") break + + # Handle case where one or both losses are None + if correct_loss is None or failed_loss is None: + if not all_losses: + raise ValueError("No losses found in the dataset.") + + # Sort all_losses by their data values + all_losses.sort(key=lambda x: x.data, reverse=True) # Highest to lowest + + # Assign first and last loss in sorted list + correct_loss = all_losses[0] + failed_loss = all_losses[-1] + print("Assigned correct_loss and failed_loss from sorted losses.") + total_loss = sum_ops([correct_loss, failed_loss]) total_loss.backward() # test optimizer self._propose_text_optimizers() - total_loss.draw_graph(filepath=debug_path) - return debug_path + debug_files = total_loss.draw_graph(filepath=debug_path, full_trace=True) + return debug_files def _set_demo_optimizers_dataset(self, train_dataset: Any): # init the dataset @@ -829,6 +952,7 @@ def _demo_optimizers_add_scores( self, ids: List[str], scores: List[float], is_teacher: bool = True ): for opt in self.demo_optimizers: + # opt = cast(DemoOptimizer, opt) opt.add_scores(ids, scores, is_teacher) def _demo_optimizers_revert(self): @@ -858,6 +982,10 @@ def _propose_text_optimizers(self): for text_optimizer in self.text_optimizers: text_optimizer.propose() + # def _add_failed_proposals_text_optimizers(self): + # for opt in self.text_optimizers: + # opt.add_failed_proposal() + def _get_trainable_text_params(self): params = [] for opt in self.text_optimizers: @@ -899,7 +1027,7 @@ def _fit_text_grad_demo_mix_constrained( ): from adalflow.optim.parameter import Parameter - log.info("Fitting using Textual Gradient Descent") + logger.info("Fitting using Textual Gradient Descent") trainer_results = ( self._pre_fit(val_dataset, test_dataset) if trainer_results is None @@ -935,7 +1063,7 @@ def _fit_text_grad_demo_mix_constrained( ) # moving batch all_samples.extend(batch) - all_losses.extend(losses) + all_losses.extend(losses) # student losses # extract the non-parameter y_preds all_y_preds.extend( [y.full_response for y in y_preds if isinstance(y, Parameter)] @@ -993,6 +1121,7 @@ def _fit_text_grad_demo_mix_constrained( print( "No proposal can improve the subset and full set, go to next step" ) + # self._add_failed_proposals_text_optimizers() self._add_one_step_in_trainer_results( trainer_results, @@ -1001,6 +1130,7 @@ def _fit_text_grad_demo_mix_constrained( trainer_results.prompts[-1], total_steps, ) + continue # set the batch size to the size of the validation set @@ -1065,7 +1195,7 @@ def _fit_text_grad_demo_mix_random( train_results: TrainerResult = None, starting_step: int = 0, ): - log.info("Fitting using Textual Gradient Descent") + logger.info("Fitting using Textual Gradient Descent") trainer_results = ( self._pre_fit(val_dataset, test_dataset) @@ -1207,7 +1337,7 @@ def _fit_demos_random( trainer_results: TrainerResult, starting_step: int, ): - log.info("Fitting using Random Demo Optimizer") + logger.info("Fitting using Random Demo Optimizer") # self.adaltask.train() trainer_results = ( self._pre_fit(val_dataset, test_dataset) @@ -1250,7 +1380,7 @@ def _fit_demos_random( loss.backward_engine_disabled = ( True # temporary disable the backward engine ) - loss.backward() # TODO: ensure no gradients in the backward, disable backward engine + loss.backward() # TODO: ensure no gradients in the backward, disable backward engine, trace the score to each class instead # Trace the teacher run self.adaltask.use_teacher(True) self.adaltask.train() @@ -1397,7 +1527,7 @@ def _fit_text_grad_random( trainer_results: TrainerResult = None, starting_step: int = 0, ) -> TrainerResult: - log.info("Fitting using Textual Gradient Descent") + logger.info("Fitting using Textual Gradient Descent") trainer_results = ( self._pre_fit(val_dataset, test_dataset) if trainer_results is None @@ -1441,13 +1571,13 @@ def _fit_text_grad_random( minimum_score=last_val_score, ) val_score = val_output.avg_score - self._add_history_text_optimizers(val_score) if val_score > last_val_score: + print(f"Optimizer step: {val_score} > {last_val_score}") # self.optimizer.step() self._step_text_optimizers() - + self._add_history_text_optimizers(val_score) # track top performor # test the model test_output = self.adaltask.validation_step( test_dataset, total_steps, self.num_workers @@ -1461,6 +1591,9 @@ def _fit_text_grad_random( total_steps, ) else: + # if val_score < last_val_score: + # self._add_failed_proposals_text_optimizers() # track failed proposals + print(f"Optimizer revert: {val_score} <= {last_val_score}") # self.optimizer.revert() self._revert_text_optimizers() @@ -1606,6 +1739,9 @@ def _text_grad_constraint_propose_step( all_y_preds, include_demo_optimizers: bool = False, ): + """Handles both the mixed training and the separate training. + When include_demo_optimizers is True, the demo optimizers are included in the training + """ # comptute moving batch acc from adalflow.optim.parameter import Parameter @@ -1677,6 +1813,7 @@ def _text_grad_constraint_propose_step( print( f"Fail subset check, try next proposal: {val_score} <= {subset_score}" ) + # self._add_failed_proposals_text_optimizers() self._track_effectiveness("subset", False) self._revert_text_optimizers() if include_demo_optimizers: @@ -1696,6 +1833,7 @@ def _text_grad_constraint_propose_step( f"Fail full check, try next proposal: {new_move_batch_score} < {move_batch_score}" ) self._track_effectiveness("fullset", False) + # self._add_failed_proposals_text_optimizers() self._revert_text_optimizers() if include_demo_optimizers: self._demo_optimizers_revert() @@ -1741,7 +1879,7 @@ def _fit_text_grad_constraint( ) -> TrainerResult: from adalflow.optim.parameter import Parameter - log.info("Fitting using Textual Gradient Descent with constraints") + logger.info("Fitting using Textual Gradient Descent with constraints") trainer_results = ( self._pre_fit(val_dataset, test_dataset) if trainer_results is None @@ -1813,11 +1951,13 @@ def _fit_text_grad_constraint( minimum_score=last_val_score, ) val_score = val_output.avg_score - self._add_history_text_optimizers(val_score) if val_score > last_val_score: print(f"Optimizer step: {val_score} > {last_val_score}") # self.optimizer.step() + self._add_history_text_optimizers( + val_score + ) # track top performor self._step_text_optimizers() # save the score @@ -1849,6 +1989,7 @@ def _fit_text_grad_constraint( else: print(f"Optimizer revert: {val_score} <= {last_val_score}") self._revert_text_optimizers() + # self._add_failed_proposals_text_optimizers() # track failed proposals self._track_effectiveness("valset", False) self._add_one_step_in_trainer_results( trainer_results, diff --git a/adalflow/poetry.lock b/adalflow/poetry.lock index bac6b6cc3..0cd8b67f3 100644 --- a/adalflow/poetry.lock +++ b/adalflow/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.5 and should not be changed by hand. [[package]] name = "absl-py" @@ -24,87 +24,87 @@ files = [ [[package]] name = "aiohttp" -version = "3.11.10" +version = "3.11.11" description = "Async http client/server framework (asyncio)" optional = false python-versions = ">=3.9" files = [ - {file = "aiohttp-3.11.10-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cbad88a61fa743c5d283ad501b01c153820734118b65aee2bd7dbb735475ce0d"}, - {file = "aiohttp-3.11.10-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:80886dac673ceaef499de2f393fc80bb4481a129e6cb29e624a12e3296cc088f"}, - {file = "aiohttp-3.11.10-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:61b9bae80ed1f338c42f57c16918853dc51775fb5cb61da70d590de14d8b5fb4"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9e2e576caec5c6a6b93f41626c9c02fc87cd91538b81a3670b2e04452a63def6"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02c13415b5732fb6ee7ff64583a5e6ed1c57aa68f17d2bda79c04888dfdc2769"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfce37f31f20800a6a6620ce2cdd6737b82e42e06e6e9bd1b36f546feb3c44f"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3bbbfff4c679c64e6e23cb213f57cc2c9165c9a65d63717108a644eb5a7398df"}, - {file = "aiohttp-3.11.10-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:49c7dbbc1a559ae14fc48387a115b7d4bbc84b4a2c3b9299c31696953c2a5219"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:68386d78743e6570f054fe7949d6cb37ef2b672b4d3405ce91fafa996f7d9b4d"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:9ef405356ba989fb57f84cac66f7b0260772836191ccefbb987f414bcd2979d9"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:5d6958671b296febe7f5f859bea581a21c1d05430d1bbdcf2b393599b1cdce77"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:99b7920e7165be5a9e9a3a7f1b680f06f68ff0d0328ff4079e5163990d046767"}, - {file = "aiohttp-3.11.10-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:0dc49f42422163efb7e6f1df2636fe3db72713f6cd94688e339dbe33fe06d61d"}, - {file = "aiohttp-3.11.10-cp310-cp310-win32.whl", hash = "sha256:40d1c7a7f750b5648642586ba7206999650208dbe5afbcc5284bcec6579c9b91"}, - {file = "aiohttp-3.11.10-cp310-cp310-win_amd64.whl", hash = "sha256:68ff6f48b51bd78ea92b31079817aff539f6c8fc80b6b8d6ca347d7c02384e33"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:77c4aa15a89847b9891abf97f3d4048f3c2d667e00f8a623c89ad2dccee6771b"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:909af95a72cedbefe5596f0bdf3055740f96c1a4baa0dd11fd74ca4de0b4e3f1"}, - {file = "aiohttp-3.11.10-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:386fbe79863eb564e9f3615b959e28b222259da0c48fd1be5929ac838bc65683"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3de34936eb1a647aa919655ff8d38b618e9f6b7f250cc19a57a4bf7fd2062b6d"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c9527819b29cd2b9f52033e7fb9ff08073df49b4799c89cb5754624ecd98299"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65a96e3e03300b41f261bbfd40dfdbf1c301e87eab7cd61c054b1f2e7c89b9e8"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98f5635f7b74bcd4f6f72fcd85bea2154b323a9f05226a80bc7398d0c90763b0"}, - {file = "aiohttp-3.11.10-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:03b6002e20938fc6ee0918c81d9e776bebccc84690e2b03ed132331cca065ee5"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:6362cc6c23c08d18ddbf0e8c4d5159b5df74fea1a5278ff4f2c79aed3f4e9f46"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3691ed7726fef54e928fe26344d930c0c8575bc968c3e239c2e1a04bd8cf7838"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:31d5093d3acd02b31c649d3a69bb072d539d4c7659b87caa4f6d2bcf57c2fa2b"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:8b3cf2dc0f0690a33f2d2b2cb15db87a65f1c609f53c37e226f84edb08d10f52"}, - {file = "aiohttp-3.11.10-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:fbbaea811a2bba171197b08eea288b9402faa2bab2ba0858eecdd0a4105753a3"}, - {file = "aiohttp-3.11.10-cp311-cp311-win32.whl", hash = "sha256:4b2c7ac59c5698a7a8207ba72d9e9c15b0fc484a560be0788b31312c2c5504e4"}, - {file = "aiohttp-3.11.10-cp311-cp311-win_amd64.whl", hash = "sha256:974d3a2cce5fcfa32f06b13ccc8f20c6ad9c51802bb7f829eae8a1845c4019ec"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b78f053a7ecfc35f0451d961dacdc671f4bcbc2f58241a7c820e9d82559844cf"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ab7485222db0959a87fbe8125e233b5a6f01f4400785b36e8a7878170d8c3138"}, - {file = "aiohttp-3.11.10-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cf14627232dfa8730453752e9cdc210966490992234d77ff90bc8dc0dce361d5"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:076bc454a7e6fd646bc82ea7f98296be0b1219b5e3ef8a488afbdd8e81fbac50"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:482cafb7dc886bebeb6c9ba7925e03591a62ab34298ee70d3dd47ba966370d2c"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf3d1a519a324af764a46da4115bdbd566b3c73fb793ffb97f9111dbc684fc4d"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:24213ba85a419103e641e55c27dc7ff03536c4873470c2478cce3311ba1eee7b"}, - {file = "aiohttp-3.11.10-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b99acd4730ad1b196bfb03ee0803e4adac371ae8efa7e1cbc820200fc5ded109"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:14cdb5a9570be5a04eec2ace174a48ae85833c2aadc86de68f55541f66ce42ab"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:7e97d622cb083e86f18317282084bc9fbf261801b0192c34fe4b1febd9f7ae69"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:012f176945af138abc10c4a48743327a92b4ca9adc7a0e078077cdb5dbab7be0"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44224d815853962f48fe124748227773acd9686eba6dc102578defd6fc99e8d9"}, - {file = "aiohttp-3.11.10-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c87bf31b7fdab94ae3adbe4a48e711bfc5f89d21cf4c197e75561def39e223bc"}, - {file = "aiohttp-3.11.10-cp312-cp312-win32.whl", hash = "sha256:06a8e2ee1cbac16fe61e51e0b0c269400e781b13bcfc33f5425912391a542985"}, - {file = "aiohttp-3.11.10-cp312-cp312-win_amd64.whl", hash = "sha256:be2b516f56ea883a3e14dda17059716593526e10fb6303189aaf5503937db408"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8cc5203b817b748adccb07f36390feb730b1bc5f56683445bfe924fc270b8816"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:5ef359ebc6949e3a34c65ce20230fae70920714367c63afd80ea0c2702902ccf"}, - {file = "aiohttp-3.11.10-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9bca390cb247dbfaec3c664326e034ef23882c3f3bfa5fbf0b56cad0320aaca5"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811f23b3351ca532af598405db1093f018edf81368e689d1b508c57dcc6b6a32"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddf5f7d877615f6a1e75971bfa5ac88609af3b74796ff3e06879e8422729fd01"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6ab29b8a0beb6f8eaf1e5049252cfe74adbaafd39ba91e10f18caeb0e99ffb34"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c49a76c1038c2dd116fa443eba26bbb8e6c37e924e2513574856de3b6516be99"}, - {file = "aiohttp-3.11.10-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7f3dc0e330575f5b134918976a645e79adf333c0a1439dcf6899a80776c9ab39"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:efb15a17a12497685304b2d976cb4939e55137df7b09fa53f1b6a023f01fcb4e"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:db1d0b28fcb7f1d35600150c3e4b490775251dea70f894bf15c678fdd84eda6a"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:15fccaf62a4889527539ecb86834084ecf6e9ea70588efde86e8bc775e0e7542"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:593c114a2221444f30749cc5e5f4012488f56bd14de2af44fe23e1e9894a9c60"}, - {file = "aiohttp-3.11.10-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7852bbcb4d0d2f0c4d583f40c3bc750ee033265d80598d0f9cb6f372baa6b836"}, - {file = "aiohttp-3.11.10-cp313-cp313-win32.whl", hash = "sha256:65e55ca7debae8faaffee0ebb4b47a51b4075f01e9b641c31e554fd376595c6c"}, - {file = "aiohttp-3.11.10-cp313-cp313-win_amd64.whl", hash = "sha256:beb39a6d60a709ae3fb3516a1581777e7e8b76933bb88c8f4420d875bb0267c6"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:0580f2e12de2138f34debcd5d88894786453a76e98febaf3e8fe5db62d01c9bf"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a55d2ad345684e7c3dd2c20d2f9572e9e1d5446d57200ff630e6ede7612e307f"}, - {file = "aiohttp-3.11.10-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04814571cb72d65a6899db6099e377ed00710bf2e3eafd2985166f2918beaf59"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e44a9a3c053b90c6f09b1bb4edd880959f5328cf63052503f892c41ea786d99f"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:502a1464ccbc800b4b1995b302efaf426e8763fadf185e933c2931df7db9a199"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:613e5169f8ae77b1933e42e418a95931fb4867b2991fc311430b15901ed67079"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4cca22a61b7fe45da8fc73c3443150c3608750bbe27641fc7558ec5117b27fdf"}, - {file = "aiohttp-3.11.10-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:86a5dfcc39309470bd7b68c591d84056d195428d5d2e0b5ccadfbaf25b026ebc"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:77ae58586930ee6b2b6f696c82cf8e78c8016ec4795c53e36718365f6959dc82"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:78153314f26d5abef3239b4a9af20c229c6f3ecb97d4c1c01b22c4f87669820c"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:98283b94cc0e11c73acaf1c9698dea80c830ca476492c0fe2622bd931f34b487"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:53bf2097e05c2accc166c142a2090e4c6fd86581bde3fd9b2d3f9e93dda66ac1"}, - {file = "aiohttp-3.11.10-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c5532f0441fc09c119e1dca18fbc0687e64fbeb45aa4d6a87211ceaee50a74c4"}, - {file = "aiohttp-3.11.10-cp39-cp39-win32.whl", hash = "sha256:47ad15a65fb41c570cd0ad9a9ff8012489e68176e7207ec7b82a0940dddfd8be"}, - {file = "aiohttp-3.11.10-cp39-cp39-win_amd64.whl", hash = "sha256:c6b9e6d7e41656d78e37ce754813fa44b455c3d0d0dced2a047def7dc5570b74"}, - {file = "aiohttp-3.11.10.tar.gz", hash = "sha256:b1fc6b45010a8d0ff9e88f9f2418c6fd408c99c211257334aff41597ebece42e"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a60804bff28662cbcf340a4d61598891f12eea3a66af48ecfdc975ceec21e3c8"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b4fa1cb5f270fb3eab079536b764ad740bb749ce69a94d4ec30ceee1b5940d5"}, + {file = "aiohttp-3.11.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:731468f555656767cda219ab42e033355fe48c85fbe3ba83a349631541715ba2"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb23d8bb86282b342481cad4370ea0853a39e4a32a0042bb52ca6bdde132df43"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f047569d655f81cb70ea5be942ee5d4421b6219c3f05d131f64088c73bb0917f"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd7659baae9ccf94ae5fe8bfaa2c7bc2e94d24611528395ce88d009107e00c6d"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af01e42ad87ae24932138f154105e88da13ce7d202a6de93fafdafb2883a00ef"}, + {file = "aiohttp-3.11.11-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5854be2f3e5a729800bac57a8d76af464e160f19676ab6aea74bde18ad19d438"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6526e5fb4e14f4bbf30411216780c9967c20c5a55f2f51d3abd6de68320cc2f3"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:85992ee30a31835fc482468637b3e5bd085fa8fe9392ba0bdcbdc1ef5e9e3c55"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:88a12ad8ccf325a8a5ed80e6d7c3bdc247d66175afedbe104ee2aaca72960d8e"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:0a6d3fbf2232e3a08c41eca81ae4f1dff3d8f1a30bae415ebe0af2d2458b8a33"}, + {file = "aiohttp-3.11.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:84a585799c58b795573c7fa9b84c455adf3e1d72f19a2bf498b54a95ae0d194c"}, + {file = "aiohttp-3.11.11-cp310-cp310-win32.whl", hash = "sha256:bfde76a8f430cf5c5584553adf9926534352251d379dcb266ad2b93c54a29745"}, + {file = "aiohttp-3.11.11-cp310-cp310-win_amd64.whl", hash = "sha256:0fd82b8e9c383af11d2b26f27a478640b6b83d669440c0a71481f7c865a51da9"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ba74ec819177af1ef7f59063c6d35a214a8fde6f987f7661f4f0eecc468a8f76"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4af57160800b7a815f3fe0eba9b46bf28aafc195555f1824555fa2cfab6c1538"}, + {file = "aiohttp-3.11.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ffa336210cf9cd8ed117011085817d00abe4c08f99968deef0013ea283547204"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81b8fe282183e4a3c7a1b72f5ade1094ed1c6345a8f153506d114af5bf8accd9"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3af41686ccec6a0f2bdc66686dc0f403c41ac2089f80e2214a0f82d001052c03"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:70d1f9dde0e5dd9e292a6d4d00058737052b01f3532f69c0c65818dac26dc287"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:249cc6912405917344192b9f9ea5cd5b139d49e0d2f5c7f70bdfaf6b4dbf3a2e"}, + {file = "aiohttp-3.11.11-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0eb98d90b6690827dcc84c246811feeb4e1eea683c0eac6caed7549be9c84665"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ec82bf1fda6cecce7f7b915f9196601a1bd1a3079796b76d16ae4cce6d0ef89b"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:9fd46ce0845cfe28f108888b3ab17abff84ff695e01e73657eec3f96d72eef34"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:bd176afcf8f5d2aed50c3647d4925d0db0579d96f75a31e77cbaf67d8a87742d"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:ec2aa89305006fba9ffb98970db6c8221541be7bee4c1d027421d6f6df7d1ce2"}, + {file = "aiohttp-3.11.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:92cde43018a2e17d48bb09c79e4d4cb0e236de5063ce897a5e40ac7cb4878773"}, + {file = "aiohttp-3.11.11-cp311-cp311-win32.whl", hash = "sha256:aba807f9569455cba566882c8938f1a549f205ee43c27b126e5450dc9f83cc62"}, + {file = "aiohttp-3.11.11-cp311-cp311-win_amd64.whl", hash = "sha256:ae545f31489548c87b0cced5755cfe5a5308d00407000e72c4fa30b19c3220ac"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e595c591a48bbc295ebf47cb91aebf9bd32f3ff76749ecf282ea7f9f6bb73886"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3ea1b59dc06396b0b424740a10a0a63974c725b1c64736ff788a3689d36c02d2"}, + {file = "aiohttp-3.11.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8811f3f098a78ffa16e0ea36dffd577eb031aea797cbdba81be039a4169e242c"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7227b87a355ce1f4bf83bfae4399b1f5bb42e0259cb9405824bd03d2f4336a"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d40f9da8cabbf295d3a9dae1295c69975b86d941bc20f0a087f0477fa0a66231"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ffb3dc385f6bb1568aa974fe65da84723210e5d9707e360e9ecb51f59406cd2e"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a8f5f7515f3552d899c61202d99dcb17d6e3b0de777900405611cd747cecd1b8"}, + {file = "aiohttp-3.11.11-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3499c7ffbfd9c6a3d8d6a2b01c26639da7e43d47c7b4f788016226b1e711caa8"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8e2bf8029dbf0810c7bfbc3e594b51c4cc9101fbffb583a3923aea184724203c"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b6212a60e5c482ef90f2d788835387070a88d52cf6241d3916733c9176d39eab"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d119fafe7b634dbfa25a8c597718e69a930e4847f0b88e172744be24515140da"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:6fba278063559acc730abf49845d0e9a9e1ba74f85f0ee6efd5803f08b285853"}, + {file = "aiohttp-3.11.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:92fc484e34b733704ad77210c7957679c5c3877bd1e6b6d74b185e9320cc716e"}, + {file = "aiohttp-3.11.11-cp312-cp312-win32.whl", hash = "sha256:9f5b3c1ed63c8fa937a920b6c1bec78b74ee09593b3f5b979ab2ae5ef60d7600"}, + {file = "aiohttp-3.11.11-cp312-cp312-win_amd64.whl", hash = "sha256:1e69966ea6ef0c14ee53ef7a3d68b564cc408121ea56c0caa2dc918c1b2f553d"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:541d823548ab69d13d23730a06f97460f4238ad2e5ed966aaf850d7c369782d9"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:929f3ed33743a49ab127c58c3e0a827de0664bfcda566108989a14068f820194"}, + {file = "aiohttp-3.11.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0882c2820fd0132240edbb4a51eb8ceb6eef8181db9ad5291ab3332e0d71df5f"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b63de12e44935d5aca7ed7ed98a255a11e5cb47f83a9fded7a5e41c40277d104"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa54f8ef31d23c506910c21163f22b124facb573bff73930735cf9fe38bf7dff"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a344d5dc18074e3872777b62f5f7d584ae4344cd6006c17ba12103759d407af3"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b7fb429ab1aafa1f48578eb315ca45bd46e9c37de11fe45c7f5f4138091e2f1"}, + {file = "aiohttp-3.11.11-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c341c7d868750e31961d6d8e60ff040fb9d3d3a46d77fd85e1ab8e76c3e9a5c4"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ed9ee95614a71e87f1a70bc81603f6c6760128b140bc4030abe6abaa988f1c3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:de8d38f1c2810fa2a4f1d995a2e9c70bb8737b18da04ac2afbf3971f65781d87"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:a9b7371665d4f00deb8f32208c7c5e652059b0fda41cf6dbcac6114a041f1cc2"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:620598717fce1b3bd14dd09947ea53e1ad510317c85dda2c9c65b622edc96b12"}, + {file = "aiohttp-3.11.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:bf8d9bfee991d8acc72d060d53860f356e07a50f0e0d09a8dfedea1c554dd0d5"}, + {file = "aiohttp-3.11.11-cp313-cp313-win32.whl", hash = "sha256:9d73ee3725b7a737ad86c2eac5c57a4a97793d9f442599bea5ec67ac9f4bdc3d"}, + {file = "aiohttp-3.11.11-cp313-cp313-win_amd64.whl", hash = "sha256:c7a06301c2fb096bdb0bd25fe2011531c1453b9f2c163c8031600ec73af1cc99"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:3e23419d832d969f659c208557de4a123e30a10d26e1e14b73431d3c13444c2e"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:21fef42317cf02e05d3b09c028712e1d73a9606f02467fd803f7c1f39cc59add"}, + {file = "aiohttp-3.11.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1f21bb8d0235fc10c09ce1d11ffbd40fc50d3f08a89e4cf3a0c503dc2562247a"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1642eceeaa5ab6c9b6dfeaaa626ae314d808188ab23ae196a34c9d97efb68350"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2170816e34e10f2fd120f603e951630f8a112e1be3b60963a1f159f5699059a6"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8be8508d110d93061197fd2d6a74f7401f73b6d12f8822bbcd6d74f2b55d71b1"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eed954b161e6b9b65f6be446ed448ed3921763cc432053ceb606f89d793927e"}, + {file = "aiohttp-3.11.11-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6c9af134da4bc9b3bd3e6a70072509f295d10ee60c697826225b60b9959acdd"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44167fc6a763d534a6908bdb2592269b4bf30a03239bcb1654781adf5e49caf1"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:479b8c6ebd12aedfe64563b85920525d05d394b85f166b7873c8bde6da612f9c"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:10b4ff0ad793d98605958089fabfa350e8e62bd5d40aa65cdc69d6785859f94e"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:b540bd67cfb54e6f0865ceccd9979687210d7ed1a1cc8c01f8e67e2f1e883d28"}, + {file = "aiohttp-3.11.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:1dac54e8ce2ed83b1f6b1a54005c87dfed139cf3f777fdc8afc76e7841101226"}, + {file = "aiohttp-3.11.11-cp39-cp39-win32.whl", hash = "sha256:568c1236b2fde93b7720f95a890741854c1200fba4a3471ff48b2934d2d93fd3"}, + {file = "aiohttp-3.11.11-cp39-cp39-win_amd64.whl", hash = "sha256:943a8b052e54dfd6439fd7989f67fc6a7f2138d0a2cf0a7de5f18aa4fe7eb3b1"}, + {file = "aiohttp-3.11.11.tar.gz", hash = "sha256:bb49c7f1e6ebf3821a42d81d494f538107610c3a705987f53068546b0e90303e"}, ] [package.dependencies] @@ -122,13 +122,13 @@ speedups = ["Brotli", "aiodns (>=3.2.0)", "brotlicffi"] [[package]] name = "aiosignal" -version = "1.3.1" +version = "1.3.2" description = "aiosignal: a list of registered asynchronous callbacks" optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" files = [ - {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"}, - {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"}, + {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, + {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, ] [package.dependencies] @@ -172,13 +172,13 @@ vertex = ["google-auth (>=2,<3)"] [[package]] name = "anyio" -version = "4.7.0" +version = "4.8.0" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.9" files = [ - {file = "anyio-4.7.0-py3-none-any.whl", hash = "sha256:ea60c3723ab42ba6fff7e8ccb0488c898ec538ff4df1f1d5e642c3601d07e352"}, - {file = "anyio-4.7.0.tar.gz", hash = "sha256:2f834749c602966b7d456a7567cafcb309f96482b5081d14ac93ccd457f9dd48"}, + {file = "anyio-4.8.0-py3-none-any.whl", hash = "sha256:b5011f270ab5eb0abf13385f851315585cc37ef330dd88e27ec3d34d651fd47a"}, + {file = "anyio-4.8.0.tar.gz", hash = "sha256:1d9fe889df5212298c0c0723fa20479d1b94883a2df44bd3897aa91083316f7a"}, ] [package.dependencies] @@ -189,9 +189,24 @@ typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx_rtd_theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21)"] trio = ["trio (>=0.26.1)"] +[[package]] +name = "asttokens" +version = "3.0.0" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = ">=3.8" +files = [ + {file = "asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2"}, + {file = "asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7"}, +] + +[package.extras] +astroid = ["astroid (>=2,<4)"] +test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] + [[package]] name = "async-timeout" version = "5.0.1" @@ -205,19 +220,19 @@ files = [ [[package]] name = "attrs" -version = "24.2.0" +version = "24.3.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "attrs-24.2.0-py3-none-any.whl", hash = "sha256:81921eb96de3191c8258c199618104dd27ac608d9366f5e35d011eae1867ede2"}, - {file = "attrs-24.2.0.tar.gz", hash = "sha256:5cfb1b9148b5b086569baec03f20d7b6bf3bcacc9a42bebf87ffaaca362f6346"}, + {file = "attrs-24.3.0-py3-none-any.whl", hash = "sha256:ac96cd038792094f438ad1f6ff80837353805ac950cd2aa0e0625ef19850c308"}, + {file = "attrs-24.3.0.tar.gz", hash = "sha256:8f5c07333d543103541ba7be0e2ce16eeee8130cb0b3f9238ab904ce1e85baff"}, ] [package.extras] benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier (<24.7)"] tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] @@ -272,32 +287,32 @@ files = [ [[package]] name = "boto3" -version = "1.35.77" +version = "1.36.0" description = "The AWS SDK for Python" optional = true python-versions = ">=3.8" files = [ - {file = "boto3-1.35.77-py3-none-any.whl", hash = "sha256:a09871805f8e462349a1c33c23eb413668df0bf68424e61d53518e1a7d883b2f"}, - {file = "boto3-1.35.77.tar.gz", hash = "sha256:cc819cdbccbc2d0dc185f1dcfe74cf3809489c4cae63c2e5d6a557aa0c5ab928"}, + {file = "boto3-1.36.0-py3-none-any.whl", hash = "sha256:d0ca7a58ce25701a52232cc8df9d87854824f1f2964b929305722ebc7959d5a9"}, + {file = "boto3-1.36.0.tar.gz", hash = "sha256:159898f51c2997a12541c0e02d6e5a8fe2993ddb307b9478fd9a339f98b57e00"}, ] [package.dependencies] -botocore = ">=1.35.77,<1.36.0" +botocore = ">=1.36.0,<1.37.0" jmespath = ">=0.7.1,<2.0.0" -s3transfer = ">=0.10.0,<0.11.0" +s3transfer = ">=0.11.0,<0.12.0" [package.extras] crt = ["botocore[crt] (>=1.21.0,<2.0a0)"] [[package]] name = "botocore" -version = "1.35.77" +version = "1.36.0" description = "Low-level, data-driven core of boto 3." -optional = false +optional = true python-versions = ">=3.8" files = [ - {file = "botocore-1.35.77-py3-none-any.whl", hash = "sha256:3faa27d65841499762228902d7e215fa99a4c2fdc76c9113e1c3f339bdf685b8"}, - {file = "botocore-1.35.77.tar.gz", hash = "sha256:17b778016644e9342ca3ff2f430c1d1db0c6126e9b41a57cff52ac58e7a455e0"}, + {file = "botocore-1.36.0-py3-none-any.whl", hash = "sha256:b54b11f0cfc47fc1243ada0f7f461266c279968487616720fa8ebb02183917d7"}, + {file = "botocore-1.36.0.tar.gz", hash = "sha256:0232029ff9ae3f5b50cdb25cbd257c16f87402b6d31a05bd6483638ee6434c4b"}, ] [package.dependencies] @@ -309,7 +324,7 @@ urllib3 = [ ] [package.extras] -crt = ["awscrt (==0.22.0)"] +crt = ["awscrt (==0.23.4)"] [[package]] name = "cachetools" @@ -324,13 +339,13 @@ files = [ [[package]] name = "certifi" -version = "2024.8.30" +version = "2024.12.14" description = "Python package for providing Mozilla's CA Bundle." optional = false python-versions = ">=3.6" files = [ - {file = "certifi-2024.8.30-py3-none-any.whl", hash = "sha256:922820b53db7a7257ffbda3f597266d435245903d80737e34f8a45ff3e3230d8"}, - {file = "certifi-2024.8.30.tar.gz", hash = "sha256:bec941d2aa8195e248a60b31ff9f0558284cf01a52591ceda73ea9afffd69fd9"}, + {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, + {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, ] [[package]] @@ -425,127 +440,114 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.4.0" +version = "3.4.1" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.7" files = [ - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, - {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, - {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, - {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, - {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, - {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, - {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, - {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, - {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, - {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, + {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, + {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, + {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, + {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, + {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, + {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"}, + {file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"}, + {file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"}, + {file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"}, ] [[package]] name = "click" -version = "8.1.7" +version = "8.1.8" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.7" files = [ - {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"}, - {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"}, + {file = "click-8.1.8-py3-none-any.whl", hash = "sha256:63c132bbbed01578a06712a2d1f497bb62d9c1c0d329b7903a866228027263b2"}, + {file = "click-8.1.8.tar.gz", hash = "sha256:ed53c9d8990d83c2a27deae68e4ee337473f6330c040a31d4225c9574d16096a"}, ] [package.dependencies] @@ -553,20 +555,19 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [[package]] name = "cohere" -version = "5.13.3" +version = "5.13.8" description = "" optional = true python-versions = "<4.0,>=3.9" files = [ - {file = "cohere-5.13.3-py3-none-any.whl", hash = "sha256:076c88fdd3d670b6577eb8e813a9072bf18b59648d4092c6f0263af3c27bf81f"}, - {file = "cohere-5.13.3.tar.gz", hash = "sha256:70d87e0d5ce48aaee5ba70ead5efbade226cb2a4b11bfcfb676f6a2db3642819"}, + {file = "cohere-5.13.8-py3-none-any.whl", hash = "sha256:94ada584bdd2c3213b243668c6c2d9a93f19bfcef13bf5b190ff9fab265a4229"}, + {file = "cohere-5.13.8.tar.gz", hash = "sha256:027e101323fb5c2fe0a7fda28b7b087a6dfa85c4d7063c419ff65d055ec83037"}, ] [package.dependencies] fastavro = ">=1.9.4,<2.0.0" httpx = ">=0.21.2" httpx-sse = "0.4.0" -numpy = ">=1.26,<2.0" parameterized = ">=0.9.0,<0.10.0" pydantic = ">=1.9.2" pydantic-core = ">=2.18.2,<3.0.0" @@ -575,9 +576,6 @@ tokenizers = ">=0.15,<1" types-requests = ">=2.0.0,<3.0.0" typing_extensions = ">=4.0.0" -[package.extras] -aws = ["boto3 (>=1.34.0,<2.0.0)", "sagemaker (>=2.232.1,<3.0.0)"] - [[package]] name = "colorama" version = "0.4.6" @@ -868,6 +866,20 @@ files = [ [package.extras] test = ["pytest (>=6)"] +[[package]] +name = "executing" +version = "2.1.0" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = ">=3.8" +files = [ + {file = "executing-2.1.0-py2.py3-none-any.whl", hash = "sha256:8d63781349375b5ebccc3142f4b30350c0cd9c79f921cde38be2be4637e98eaf"}, + {file = "executing-2.1.0.tar.gz", hash = "sha256:8ea27ddd260da8150fa5a708269c4a10e76161e2496ec3e587da9e3c0fe4b9ab"}, +] + +[package.extras] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] + [[package]] name = "faiss-cpu" version = "1.9.0.post1" @@ -909,42 +921,42 @@ packaging = "*" [[package]] name = "fastavro" -version = "1.9.7" +version = "1.10.0" description = "Fast read/write of AVRO files" optional = true -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "fastavro-1.9.7-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cc811fb4f7b5ae95f969cda910241ceacf82e53014c7c7224df6f6e0ca97f52f"}, - {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb8749e419a85f251bf1ac87d463311874972554d25d4a0b19f6bdc56036d7cf"}, - {file = "fastavro-1.9.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b2f9bafa167cb4d1c3dd17565cb5bf3d8c0759e42620280d1760f1e778e07fc"}, - {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:e87d04b235b29f7774d226b120da2ca4e60b9e6fdf6747daef7f13f218b3517a"}, - {file = "fastavro-1.9.7-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:b525c363e267ed11810aaad8fbdbd1c3bd8837d05f7360977d72a65ab8c6e1fa"}, - {file = "fastavro-1.9.7-cp310-cp310-win_amd64.whl", hash = "sha256:6312fa99deecc319820216b5e1b1bd2d7ebb7d6f221373c74acfddaee64e8e60"}, - {file = "fastavro-1.9.7-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ec8499dc276c2d2ef0a68c0f1ad11782b2b956a921790a36bf4c18df2b8d4020"}, - {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76d9d96f98052615ab465c63ba8b76ed59baf2e3341b7b169058db104cbe2aa0"}, - {file = "fastavro-1.9.7-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919f3549e07a8a8645a2146f23905955c35264ac809f6c2ac18142bc5b9b6022"}, - {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9de1fa832a4d9016724cd6facab8034dc90d820b71a5d57c7e9830ffe90f31e4"}, - {file = "fastavro-1.9.7-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1d09227d1f48f13281bd5ceac958650805aef9a4ef4f95810128c1f9be1df736"}, - {file = "fastavro-1.9.7-cp311-cp311-win_amd64.whl", hash = "sha256:2db993ae6cdc63e25eadf9f93c9e8036f9b097a3e61d19dca42536dcc5c4d8b3"}, - {file = "fastavro-1.9.7-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:4e1289b731214a7315884c74b2ec058b6e84380ce9b18b8af5d387e64b18fc44"}, - {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eac69666270a76a3a1d0444f39752061195e79e146271a568777048ffbd91a27"}, - {file = "fastavro-1.9.7-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9be089be8c00f68e343bbc64ca6d9a13e5e5b0ba8aa52bcb231a762484fb270e"}, - {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d576eccfd60a18ffa028259500df67d338b93562c6700e10ef68bbd88e499731"}, - {file = "fastavro-1.9.7-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ee9bf23c157bd7dcc91ea2c700fa3bd924d9ec198bb428ff0b47fa37fe160659"}, - {file = "fastavro-1.9.7-cp312-cp312-win_amd64.whl", hash = "sha256:b6b2ccdc78f6afc18c52e403ee68c00478da12142815c1bd8a00973138a166d0"}, - {file = "fastavro-1.9.7-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7313def3aea3dacface0a8b83f6d66e49a311149aa925c89184a06c1ef99785d"}, - {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:536f5644737ad21d18af97d909dba099b9e7118c237be7e4bd087c7abde7e4f0"}, - {file = "fastavro-1.9.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2af559f30383b79cf7d020a6b644c42ffaed3595f775fe8f3d7f80b1c43dfdc5"}, - {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:edc28ab305e3c424de5ac5eb87b48d1e07eddb6aa08ef5948fcda33cc4d995ce"}, - {file = "fastavro-1.9.7-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:ec2e96bdabd58427fe683329b3d79f42c7b4f4ff6b3644664a345a655ac2c0a1"}, - {file = "fastavro-1.9.7-cp38-cp38-win_amd64.whl", hash = "sha256:3b683693c8a85ede496ebebe115be5d7870c150986e34a0442a20d88d7771224"}, - {file = "fastavro-1.9.7-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:58f76a5c9a312fbd37b84e49d08eb23094d36e10d43bc5df5187bc04af463feb"}, - {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56304401d2f4f69f5b498bdd1552c13ef9a644d522d5de0dc1d789cf82f47f73"}, - {file = "fastavro-1.9.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fcce036c6aa06269fc6a0428050fcb6255189997f5e1a728fc461e8b9d3e26b"}, - {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:17de68aae8c2525f5631d80f2b447a53395cdc49134f51b0329a5497277fc2d2"}, - {file = "fastavro-1.9.7-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7c911366c625d0a997eafe0aa83ffbc6fd00d8fd4543cb39a97c6f3b8120ea87"}, - {file = "fastavro-1.9.7-cp39-cp39-win_amd64.whl", hash = "sha256:912283ed48578a103f523817fdf0c19b1755cea9b4a6387b73c79ecb8f8f84fc"}, - {file = "fastavro-1.9.7.tar.gz", hash = "sha256:13e11c6cb28626da85290933027cd419ce3f9ab8e45410ef24ce6b89d20a1f6c"}, + {file = "fastavro-1.10.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1a9fe0672d2caf0fe54e3be659b13de3cad25a267f2073d6f4b9f8862acc31eb"}, + {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:86dd0410770e0c99363788f0584523709d85e57bb457372ec5c285a482c17fe6"}, + {file = "fastavro-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:190e80dc7d77d03a6a8597a026146b32a0bbe45e3487ab4904dc8c1bebecb26d"}, + {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bf570d63be9155c3fdc415f60a49c171548334b70fff0679a184b69c29b6bc61"}, + {file = "fastavro-1.10.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e07abb6798e95dccecaec316265e35a018b523d1f3944ad396d0a93cb95e0a08"}, + {file = "fastavro-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:37203097ed11d0b8fd3c004904748777d730cafd26e278167ea602eebdef8eb2"}, + {file = "fastavro-1.10.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d183c075f527ab695a27ae75f210d4a86bce660cda2f85ae84d5606efc15ef50"}, + {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d7a95a2c0639bffd7c079b59e9a796bfc3a9acd78acff7088f7c54ade24e4a77"}, + {file = "fastavro-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a678153b5da1b024a32ec3f611b2e7afd24deac588cb51dd1b0019935191a6d"}, + {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:67a597a5cfea4dddcf8b49eaf8c2b5ffee7fda15b578849185bc690ec0cd0d8f"}, + {file = "fastavro-1.10.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1fd689724760b17f69565d8a4e7785ed79becd451d1c99263c40cb2d6491f1d4"}, + {file = "fastavro-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:4f949d463f9ac4221128a51e4e34e2562f401e5925adcadfd28637a73df6c2d8"}, + {file = "fastavro-1.10.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:cfe57cb0d72f304bd0dcc5a3208ca6a7363a9ae76f3073307d095c9d053b29d4"}, + {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74e517440c824cb65fb29d3e3903a9406f4d7c75490cef47e55c4c82cdc66270"}, + {file = "fastavro-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:203c17d44cadde76e8eecb30f2d1b4f33eb478877552d71f049265dc6f2ecd10"}, + {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6575be7f2b5f94023b5a4e766b0251924945ad55e9a96672dc523656d17fe251"}, + {file = "fastavro-1.10.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fe471deb675ed2f01ee2aac958fbf8ebb13ea00fa4ce7f87e57710a0bc592208"}, + {file = "fastavro-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:567ff515f2a5d26d9674b31c95477f3e6022ec206124c62169bc2ffaf0889089"}, + {file = "fastavro-1.10.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:82263af0adfddb39c85f9517d736e1e940fe506dfcc35bc9ab9f85e0fa9236d8"}, + {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:566c193109ff0ff84f1072a165b7106c4f96050078a4e6ac7391f81ca1ef3efa"}, + {file = "fastavro-1.10.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e400d2e55d068404d9fea7c5021f8b999c6f9d9afa1d1f3652ec92c105ffcbdd"}, + {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:9b8227497f71565270f9249fc9af32a93644ca683a0167cfe66d203845c3a038"}, + {file = "fastavro-1.10.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8e62d04c65461b30ac6d314e4197ad666371e97ae8cb2c16f971d802f6c7f514"}, + {file = "fastavro-1.10.0-cp313-cp313-win_amd64.whl", hash = "sha256:86baf8c9740ab570d0d4d18517da71626fe9be4d1142bea684db52bd5adb078f"}, + {file = "fastavro-1.10.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5bccbb6f8e9e5b834cca964f0e6ebc27ebe65319d3940b0b397751a470f45612"}, + {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b0132f6b0b53f61a0a508a577f64beb5de1a5e068a9b4c0e1df6e3b66568eec4"}, + {file = "fastavro-1.10.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ca37a363b711202c6071a6d4787e68e15fa3ab108261058c4aae853c582339af"}, + {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:cf38cecdd67ca9bd92e6e9ba34a30db6343e7a3bedf171753ee78f8bd9f8a670"}, + {file = "fastavro-1.10.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:f4dd10e0ed42982122d20cdf1a88aa50ee09e5a9cd9b39abdffb1aa4f5b76435"}, + {file = "fastavro-1.10.0-cp39-cp39-win_amd64.whl", hash = "sha256:aaef147dc14dd2d7823246178fd06fc5e477460e070dc6d9e07dd8193a6bc93c"}, + {file = "fastavro-1.10.0.tar.gz", hash = "sha256:47bf41ac6d52cdfe4a3da88c75a802321321b37b663a900d12765101a5d6886f"}, ] [package.extras] @@ -971,61 +983,61 @@ typing = ["typing-extensions (>=4.12.2)"] [[package]] name = "fonttools" -version = "4.55.2" +version = "4.55.3" description = "Tools to manipulate font files" optional = false python-versions = ">=3.8" files = [ - {file = "fonttools-4.55.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:bef0f8603834643b1a6419d57902f18e7d950ec1a998fb70410635c598dc1a1e"}, - {file = "fonttools-4.55.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:944228b86d472612d3b48bcc83b31c25c2271e63fdc74539adfcfa7a96d487fb"}, - {file = "fonttools-4.55.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f0e55f5da594b85f269cfbecd2f6bd3e07d0abba68870bc3f34854de4fa4678"}, - {file = "fonttools-4.55.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b1a6e576db0c83c1b91925bf1363478c4bb968dbe8433147332fb5782ce6190"}, - {file = "fonttools-4.55.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:616368b15716781bc84df5c2191dc0540137aaef56c2771eb4b89b90933f347a"}, - {file = "fonttools-4.55.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7bbae4f3915225c2c37670da68e2bf18a21206060ad31dfb95fec91ef641caa7"}, - {file = "fonttools-4.55.2-cp310-cp310-win32.whl", hash = "sha256:8b02b10648d69d67a7eb055f4d3eedf4a85deb22fb7a19fbd9acbae7c7538199"}, - {file = "fonttools-4.55.2-cp310-cp310-win_amd64.whl", hash = "sha256:bbea0ab841113ac8e8edde067e099b7288ffc6ac2dded538b131c2c0595d5f77"}, - {file = "fonttools-4.55.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:d34525e8141286fa976e14806639d32294bfb38d28bbdb5f6be9f46a1cd695a6"}, - {file = "fonttools-4.55.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ecd1c2b1c2ec46bb73685bc5473c72e16ed0930ef79bc2919ccadc43a99fb16"}, - {file = "fonttools-4.55.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9008438ad59e5a8e403a62fbefef2b2ff377eb3857d90a3f2a5f4d674ff441b2"}, - {file = "fonttools-4.55.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:131591ac8d7a47043aaf29581aba755ae151d46e49d2bf49608601efd71e8b4d"}, - {file = "fonttools-4.55.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:4c83381c3e3e3d9caa25527c4300543578341f21aae89e4fbbb4debdda8d82a2"}, - {file = "fonttools-4.55.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:42aca564b575252fd9954ed0d91d97a24de24289a16ce8ff74ed0bdf5ecebf11"}, - {file = "fonttools-4.55.2-cp311-cp311-win32.whl", hash = "sha256:c6457f650ebe15baa17fc06e256227f0a47f46f80f27ec5a0b00160de8dc2c13"}, - {file = "fonttools-4.55.2-cp311-cp311-win_amd64.whl", hash = "sha256:5cfa67414d7414442a5635ff634384101c54f53bb7b0e04aa6a61b013fcce194"}, - {file = "fonttools-4.55.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:18f082445b8fe5e91c53e6184f4c1c73f3f965c8bcc614c6cd6effd573ce6c1a"}, - {file = "fonttools-4.55.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:27c0f91adbbd706e8acd1db73e3e510118e62d0ffb651864567dccc5b2339f90"}, - {file = "fonttools-4.55.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d8ccce035320d63dba0c35f52499322f5531dbe85bba1514c7cea26297e4c54"}, - {file = "fonttools-4.55.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96e126df9615df214ec7f04bebcf60076297fbc10b75c777ce58b702d7708ffb"}, - {file = "fonttools-4.55.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:508ebb42956a7a931c4092dfa2d9b4ffd4f94cea09b8211199090d2bd082506b"}, - {file = "fonttools-4.55.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:c1b9de46ef7b683d50400abf9f1578eaceee271ff51c36bf4b7366f2be29f498"}, - {file = "fonttools-4.55.2-cp312-cp312-win32.whl", hash = "sha256:2df61d9fc15199cc86dad29f64dd686874a3a52dda0c2d8597d21f509f95c332"}, - {file = "fonttools-4.55.2-cp312-cp312-win_amd64.whl", hash = "sha256:d337ec087da8216a828574aa0525d869df0a2ac217a2efc1890974ddd1fbc5b9"}, - {file = "fonttools-4.55.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:10aff204e2edee1d312fa595c06f201adf8d528a3b659cfb34cd47eceaaa6a26"}, - {file = "fonttools-4.55.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:09fe922a3eff181fd07dd724cdb441fb6b9fc355fd1c0f1aa79aca60faf1fbdd"}, - {file = "fonttools-4.55.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:487e1e8b524143a799bda0169c48b44a23a6027c1bb1957d5a172a7d3a1dd704"}, - {file = "fonttools-4.55.2-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9b1726872e09268bbedb14dc02e58b7ea31ecdd1204c6073eda4911746b44797"}, - {file = "fonttools-4.55.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6fc88cfb58b0cd7b48718c3e61dd0d0a3ee8e2c86b973342967ce09fbf1db6d4"}, - {file = "fonttools-4.55.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e857fe1859901ad8c5cab32e0eebc920adb09f413d2d73b74b677cf47b28590c"}, - {file = "fonttools-4.55.2-cp313-cp313-win32.whl", hash = "sha256:81ccd2b3a420b8050c7d9db3be0555d71662973b3ef2a1d921a2880b58957db8"}, - {file = "fonttools-4.55.2-cp313-cp313-win_amd64.whl", hash = "sha256:d559eb1744c7dcfa90ae60cb1a4b3595e898e48f4198738c321468c01180cd83"}, - {file = "fonttools-4.55.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6b5917ef79cac8300b88fd6113003fd01bbbbea2ea060a27b95d8f77cb4c65c2"}, - {file = "fonttools-4.55.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:663eba5615d6abaaf616432354eb7ce951d518e43404371bcc2b0694ef21e8d6"}, - {file = "fonttools-4.55.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:803d5cef5fc47f44f5084d154aa3d6f069bb1b60e32390c225f897fa19b0f939"}, - {file = "fonttools-4.55.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8bc5f100de0173cc39102c0399bd6c3bd544bbdf224957933f10ee442d43cddd"}, - {file = "fonttools-4.55.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:3d9bbc1e380fdaf04ad9eabd8e3e6a4301eaf3487940893e9fd98537ea2e283b"}, - {file = "fonttools-4.55.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:42a9afedff07b6f75aa0f39b5e49922ac764580ef3efce035ca30284b2ee65c8"}, - {file = "fonttools-4.55.2-cp38-cp38-win32.whl", hash = "sha256:f1c76f423f1a241df08f87614364dff6e0b7ce23c962c1b74bd995ec7c0dad13"}, - {file = "fonttools-4.55.2-cp38-cp38-win_amd64.whl", hash = "sha256:25062b6ca03464dd5179fc2040fb19e03391b7cc49b9cc4f879312e638605c5c"}, - {file = "fonttools-4.55.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d1100d8e665fe386a79cab59446992de881ea74d0d6c191bb988642692aa2421"}, - {file = "fonttools-4.55.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dbdc251c5e472e5ae6bc816f9b82718b8e93ff7992e7331d6cf3562b96aa268e"}, - {file = "fonttools-4.55.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0bf24d2b02dbc9376d795a63062632ff73e3e9e60c0229373f500aed7e86dd7"}, - {file = "fonttools-4.55.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d4ff250ed4ff05015dfd9cf2adf7570c7a383ca80f4d9732ac484a5ed0d8453c"}, - {file = "fonttools-4.55.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:44cf2a98aa661dbdeb8c03f5e405b074e2935196780bb729888639f5276067d9"}, - {file = "fonttools-4.55.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:22ef222740eb89d189bf0612eb98fbae592c61d7efeac51bfbc2a1592d469557"}, - {file = "fonttools-4.55.2-cp39-cp39-win32.whl", hash = "sha256:93f439ca27e55f585e7aaa04a74990acd983b5f2245e41d6b79f0a8b44e684d8"}, - {file = "fonttools-4.55.2-cp39-cp39-win_amd64.whl", hash = "sha256:627cf10d6f5af5bec6324c18a2670f134c29e1b7dce3fb62e8ef88baa6cba7a9"}, - {file = "fonttools-4.55.2-py3-none-any.whl", hash = "sha256:8e2d89fbe9b08d96e22c7a81ec04a4e8d8439c31223e2dc6f2f9fc8ff14bdf9f"}, - {file = "fonttools-4.55.2.tar.gz", hash = "sha256:45947e7b3f9673f91df125d375eb57b9a23f2a603f438a1aebf3171bffa7a205"}, + {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:1dcc07934a2165ccdc3a5a608db56fb3c24b609658a5b340aee4ecf3ba679dc0"}, + {file = "fonttools-4.55.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f7d66c15ba875432a2d2fb419523f5d3d347f91f48f57b8b08a2dfc3c39b8a3f"}, + {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e4ae3592e62eba83cd2c4ccd9462dcfa603ff78e09110680a5444c6925d841"}, + {file = "fonttools-4.55.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62d65a3022c35e404d19ca14f291c89cc5890032ff04f6c17af0bd1927299674"}, + {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d342e88764fb201286d185093781bf6628bbe380a913c24adf772d901baa8276"}, + {file = "fonttools-4.55.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:dd68c87a2bfe37c5b33bcda0fba39b65a353876d3b9006fde3adae31f97b3ef5"}, + {file = "fonttools-4.55.3-cp310-cp310-win32.whl", hash = "sha256:1bc7ad24ff98846282eef1cbeac05d013c2154f977a79886bb943015d2b1b261"}, + {file = "fonttools-4.55.3-cp310-cp310-win_amd64.whl", hash = "sha256:b54baf65c52952db65df39fcd4820668d0ef4766c0ccdf32879b77f7c804d5c5"}, + {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8c4491699bad88efe95772543cd49870cf756b019ad56294f6498982408ab03e"}, + {file = "fonttools-4.55.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:5323a22eabddf4b24f66d26894f1229261021dacd9d29e89f7872dd8c63f0b8b"}, + {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5480673f599ad410695ca2ddef2dfefe9df779a9a5cda89503881e503c9c7d90"}, + {file = "fonttools-4.55.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da9da6d65cd7aa6b0f806556f4985bcbf603bf0c5c590e61b43aa3e5a0f822d0"}, + {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:e894b5bd60d9f473bed7a8f506515549cc194de08064d829464088d23097331b"}, + {file = "fonttools-4.55.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:aee3b57643827e237ff6ec6d28d9ff9766bd8b21e08cd13bff479e13d4b14765"}, + {file = "fonttools-4.55.3-cp311-cp311-win32.whl", hash = "sha256:eb6ca911c4c17eb51853143624d8dc87cdcdf12a711fc38bf5bd21521e79715f"}, + {file = "fonttools-4.55.3-cp311-cp311-win_amd64.whl", hash = "sha256:6314bf82c54c53c71805318fcf6786d986461622dd926d92a465199ff54b1b72"}, + {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:f9e736f60f4911061235603a6119e72053073a12c6d7904011df2d8fad2c0e35"}, + {file = "fonttools-4.55.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:7a8aa2c5e5b8b3bcb2e4538d929f6589a5c6bdb84fd16e2ed92649fb5454f11c"}, + {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07f8288aacf0a38d174445fc78377a97fb0b83cfe352a90c9d9c1400571963c7"}, + {file = "fonttools-4.55.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8d5e8916c0970fbc0f6f1bece0063363bb5857a7f170121a4493e31c3db3314"}, + {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ae3b6600565b2d80b7c05acb8e24d2b26ac407b27a3f2e078229721ba5698427"}, + {file = "fonttools-4.55.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:54153c49913f45065c8d9e6d0c101396725c5621c8aee744719300f79771d75a"}, + {file = "fonttools-4.55.3-cp312-cp312-win32.whl", hash = "sha256:827e95fdbbd3e51f8b459af5ea10ecb4e30af50221ca103bea68218e9615de07"}, + {file = "fonttools-4.55.3-cp312-cp312-win_amd64.whl", hash = "sha256:e6e8766eeeb2de759e862004aa11a9ea3d6f6d5ec710551a88b476192b64fd54"}, + {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:a430178ad3e650e695167cb53242dae3477b35c95bef6525b074d87493c4bf29"}, + {file = "fonttools-4.55.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:529cef2ce91dc44f8e407cc567fae6e49a1786f2fefefa73a294704c415322a4"}, + {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e75f12c82127486fac2d8bfbf5bf058202f54bf4f158d367e41647b972342ca"}, + {file = "fonttools-4.55.3-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:859c358ebf41db18fb72342d3080bce67c02b39e86b9fbcf1610cca14984841b"}, + {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:546565028e244a701f73df6d8dd6be489d01617863ec0c6a42fa25bf45d43048"}, + {file = "fonttools-4.55.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:aca318b77f23523309eec4475d1fbbb00a6b133eb766a8bdc401faba91261abe"}, + {file = "fonttools-4.55.3-cp313-cp313-win32.whl", hash = "sha256:8c5ec45428edaa7022f1c949a632a6f298edc7b481312fc7dc258921e9399628"}, + {file = "fonttools-4.55.3-cp313-cp313-win_amd64.whl", hash = "sha256:11e5de1ee0d95af4ae23c1a138b184b7f06e0b6abacabf1d0db41c90b03d834b"}, + {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:caf8230f3e10f8f5d7593eb6d252a37caf58c480b19a17e250a63dad63834cf3"}, + {file = "fonttools-4.55.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b586ab5b15b6097f2fb71cafa3c98edfd0dba1ad8027229e7b1e204a58b0e09d"}, + {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8c2794ded89399cc2169c4d0bf7941247b8d5932b2659e09834adfbb01589aa"}, + {file = "fonttools-4.55.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf4fe7c124aa3f4e4c1940880156e13f2f4d98170d35c749e6b4f119a872551e"}, + {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:86721fbc389ef5cc1e2f477019e5069e8e4421e8d9576e9c26f840dbb04678de"}, + {file = "fonttools-4.55.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:89bdc5d88bdeec1b15af790810e267e8332d92561dce4f0748c2b95c9bdf3926"}, + {file = "fonttools-4.55.3-cp38-cp38-win32.whl", hash = "sha256:bc5dbb4685e51235ef487e4bd501ddfc49be5aede5e40f4cefcccabc6e60fb4b"}, + {file = "fonttools-4.55.3-cp38-cp38-win_amd64.whl", hash = "sha256:cd70de1a52a8ee2d1877b6293af8a2484ac82514f10b1c67c1c5762d38073e56"}, + {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:bdcc9f04b36c6c20978d3f060e5323a43f6222accc4e7fcbef3f428e216d96af"}, + {file = "fonttools-4.55.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c3ca99e0d460eff46e033cd3992a969658c3169ffcd533e0a39c63a38beb6831"}, + {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22f38464daa6cdb7b6aebd14ab06609328fe1e9705bb0fcc7d1e69de7109ee02"}, + {file = "fonttools-4.55.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ed63959d00b61959b035c7d47f9313c2c1ece090ff63afea702fe86de00dbed4"}, + {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5e8d657cd7326eeaba27de2740e847c6b39dde2f8d7cd7cc56f6aad404ddf0bd"}, + {file = "fonttools-4.55.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:fb594b5a99943042c702c550d5494bdd7577f6ef19b0bc73877c948a63184a32"}, + {file = "fonttools-4.55.3-cp39-cp39-win32.whl", hash = "sha256:dc5294a3d5c84226e3dbba1b6f61d7ad813a8c0238fceea4e09aa04848c3d851"}, + {file = "fonttools-4.55.3-cp39-cp39-win_amd64.whl", hash = "sha256:aedbeb1db64496d098e6be92b2e63b5fac4e53b1b92032dfc6988e1ea9134a4d"}, + {file = "fonttools-4.55.3-py3-none-any.whl", hash = "sha256:f412604ccbeee81b091b420272841e5ec5ef68967a9790e80bffd0e30b8e2977"}, + {file = "fonttools-4.55.3.tar.gz", hash = "sha256:3983313c2a04d6cc1fe9251f8fc647754cf49a61dac6cb1e7249ae67afaafc45"}, ] [package.extras] @@ -1239,13 +1251,13 @@ grpcio-gcp = ["grpcio-gcp (>=0.2.2,<1.0.dev0)"] [[package]] name = "google-api-python-client" -version = "2.154.0" +version = "2.159.0" description = "Google API Client Library for Python" optional = false python-versions = ">=3.7" files = [ - {file = "google_api_python_client-2.154.0-py2.py3-none-any.whl", hash = "sha256:a521bbbb2ec0ba9d6f307cdd64ed6e21eeac372d1bd7493a4ab5022941f784ad"}, - {file = "google_api_python_client-2.154.0.tar.gz", hash = "sha256:1b420062e03bfcaa1c79e2e00a612d29a6a934151ceb3d272fe150a656dc8f17"}, + {file = "google_api_python_client-2.159.0-py2.py3-none-any.whl", hash = "sha256:baef0bb631a60a0bd7c0bf12a5499e3a40cd4388484de7ee55c1950bf820a0cf"}, + {file = "google_api_python_client-2.159.0.tar.gz", hash = "sha256:55197f430f25c907394b44fa078545ffef89d33fd4dca501b7db9f0d8e224bd6"}, ] [package.dependencies] @@ -1257,13 +1269,13 @@ uritemplate = ">=3.0.1,<5" [[package]] name = "google-auth" -version = "2.36.0" +version = "2.37.0" description = "Google Authentication Library" optional = false python-versions = ">=3.7" files = [ - {file = "google_auth-2.36.0-py2.py3-none-any.whl", hash = "sha256:51a15d47028b66fd36e5c64a82d2d57480075bccc7da37cde257fc94177a61fb"}, - {file = "google_auth-2.36.0.tar.gz", hash = "sha256:545e9618f2df0bcbb7dcbc45a546485b1212624716975a1ea5ae8149ce769ab1"}, + {file = "google_auth-2.37.0-py2.py3-none-any.whl", hash = "sha256:42664f18290a6be591be5329a96fe30184be1a1badb7292a7f686a9659de9ca0"}, + {file = "google_auth-2.37.0.tar.gz", hash = "sha256:0054623abf1f9c83492c63d3f47e77f0a544caa3d40b2d98e099a611c2dd5d00"}, ] [package.dependencies] @@ -1274,6 +1286,7 @@ rsa = ">=3.1.4,<5" [package.extras] aiohttp = ["aiohttp (>=3.6.2,<4.0.0.dev0)", "requests (>=2.20.0,<3.0.0.dev0)"] enterprise-cert = ["cryptography", "pyopenssl"] +pyjwt = ["cryptography (>=38.0.3)", "pyjwt (>=2.0)"] pyopenssl = ["cryptography (>=38.0.3)", "pyopenssl (>=20.0.0)"] reauth = ["pyu2f (>=0.1.5)"] requests = ["requests (>=2.20.0,<3.0.0.dev0)"] @@ -1456,70 +1469,70 @@ typing-extensions = ">=4.7,<5" [[package]] name = "grpcio" -version = "1.68.1" +version = "1.69.0" description = "HTTP/2-based RPC framework" optional = false python-versions = ">=3.8" files = [ - {file = "grpcio-1.68.1-cp310-cp310-linux_armv7l.whl", hash = "sha256:d35740e3f45f60f3c37b1e6f2f4702c23867b9ce21c6410254c9c682237da68d"}, - {file = "grpcio-1.68.1-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:d99abcd61760ebb34bdff37e5a3ba333c5cc09feda8c1ad42547bea0416ada78"}, - {file = "grpcio-1.68.1-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:f8261fa2a5f679abeb2a0a93ad056d765cdca1c47745eda3f2d87f874ff4b8c9"}, - {file = "grpcio-1.68.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0feb02205a27caca128627bd1df4ee7212db051019a9afa76f4bb6a1a80ca95e"}, - {file = "grpcio-1.68.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:919d7f18f63bcad3a0f81146188e90274fde800a94e35d42ffe9eadf6a9a6330"}, - {file = "grpcio-1.68.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:963cc8d7d79b12c56008aabd8b457f400952dbea8997dd185f155e2f228db079"}, - {file = "grpcio-1.68.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ccf2ebd2de2d6661e2520dae293298a3803a98ebfc099275f113ce1f6c2a80f1"}, - {file = "grpcio-1.68.1-cp310-cp310-win32.whl", hash = "sha256:2cc1fd04af8399971bcd4f43bd98c22d01029ea2e56e69c34daf2bf8470e47f5"}, - {file = "grpcio-1.68.1-cp310-cp310-win_amd64.whl", hash = "sha256:ee2e743e51cb964b4975de572aa8fb95b633f496f9fcb5e257893df3be854746"}, - {file = "grpcio-1.68.1-cp311-cp311-linux_armv7l.whl", hash = "sha256:55857c71641064f01ff0541a1776bfe04a59db5558e82897d35a7793e525774c"}, - {file = "grpcio-1.68.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4b177f5547f1b995826ef529d2eef89cca2f830dd8b2c99ffd5fde4da734ba73"}, - {file = "grpcio-1.68.1-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:3522c77d7e6606d6665ec8d50e867f13f946a4e00c7df46768f1c85089eae515"}, - {file = "grpcio-1.68.1-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9d1fae6bbf0816415b81db1e82fb3bf56f7857273c84dcbe68cbe046e58e1ccd"}, - {file = "grpcio-1.68.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:298ee7f80e26f9483f0b6f94cc0a046caf54400a11b644713bb5b3d8eb387600"}, - {file = "grpcio-1.68.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:cbb5780e2e740b6b4f2d208e90453591036ff80c02cc605fea1af8e6fc6b1bbe"}, - {file = "grpcio-1.68.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ddda1aa22495d8acd9dfbafff2866438d12faec4d024ebc2e656784d96328ad0"}, - {file = "grpcio-1.68.1-cp311-cp311-win32.whl", hash = "sha256:b33bd114fa5a83f03ec6b7b262ef9f5cac549d4126f1dc702078767b10c46ed9"}, - {file = "grpcio-1.68.1-cp311-cp311-win_amd64.whl", hash = "sha256:7f20ebec257af55694d8f993e162ddf0d36bd82d4e57f74b31c67b3c6d63d8b2"}, - {file = "grpcio-1.68.1-cp312-cp312-linux_armv7l.whl", hash = "sha256:8829924fffb25386995a31998ccbbeaa7367223e647e0122043dfc485a87c666"}, - {file = "grpcio-1.68.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:3aed6544e4d523cd6b3119b0916cef3d15ef2da51e088211e4d1eb91a6c7f4f1"}, - {file = "grpcio-1.68.1-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:4efac5481c696d5cb124ff1c119a78bddbfdd13fc499e3bc0ca81e95fc573684"}, - {file = "grpcio-1.68.1-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ab2d912ca39c51f46baf2a0d92aa265aa96b2443266fc50d234fa88bf877d8e"}, - {file = "grpcio-1.68.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95c87ce2a97434dffe7327a4071839ab8e8bffd0054cc74cbe971fba98aedd60"}, - {file = "grpcio-1.68.1-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:e4842e4872ae4ae0f5497bf60a0498fa778c192cc7a9e87877abd2814aca9475"}, - {file = "grpcio-1.68.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:255b1635b0ed81e9f91da4fcc8d43b7ea5520090b9a9ad9340d147066d1d3613"}, - {file = "grpcio-1.68.1-cp312-cp312-win32.whl", hash = "sha256:7dfc914cc31c906297b30463dde0b9be48e36939575eaf2a0a22a8096e69afe5"}, - {file = "grpcio-1.68.1-cp312-cp312-win_amd64.whl", hash = "sha256:a0c8ddabef9c8f41617f213e527254c41e8b96ea9d387c632af878d05db9229c"}, - {file = "grpcio-1.68.1-cp313-cp313-linux_armv7l.whl", hash = "sha256:a47faedc9ea2e7a3b6569795c040aae5895a19dde0c728a48d3c5d7995fda385"}, - {file = "grpcio-1.68.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:390eee4225a661c5cd133c09f5da1ee3c84498dc265fd292a6912b65c421c78c"}, - {file = "grpcio-1.68.1-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:66a24f3d45c33550703f0abb8b656515b0ab777970fa275693a2f6dc8e35f1c1"}, - {file = "grpcio-1.68.1-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c08079b4934b0bf0a8847f42c197b1d12cba6495a3d43febd7e99ecd1cdc8d54"}, - {file = "grpcio-1.68.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8720c25cd9ac25dd04ee02b69256d0ce35bf8a0f29e20577427355272230965a"}, - {file = "grpcio-1.68.1-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:04cfd68bf4f38f5bb959ee2361a7546916bd9a50f78617a346b3aeb2b42e2161"}, - {file = "grpcio-1.68.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:c28848761a6520c5c6071d2904a18d339a796ebe6b800adc8b3f474c5ce3c3ad"}, - {file = "grpcio-1.68.1-cp313-cp313-win32.whl", hash = "sha256:77d65165fc35cff6e954e7fd4229e05ec76102d4406d4576528d3a3635fc6172"}, - {file = "grpcio-1.68.1-cp313-cp313-win_amd64.whl", hash = "sha256:a8040f85dcb9830d8bbb033ae66d272614cec6faceee88d37a88a9bd1a7a704e"}, - {file = "grpcio-1.68.1-cp38-cp38-linux_armv7l.whl", hash = "sha256:eeb38ff04ab6e5756a2aef6ad8d94e89bb4a51ef96e20f45c44ba190fa0bcaad"}, - {file = "grpcio-1.68.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8a3869a6661ec8f81d93f4597da50336718bde9eb13267a699ac7e0a1d6d0bea"}, - {file = "grpcio-1.68.1-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:2c4cec6177bf325eb6faa6bd834d2ff6aa8bb3b29012cceb4937b86f8b74323c"}, - {file = "grpcio-1.68.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12941d533f3cd45d46f202e3667be8ebf6bcb3573629c7ec12c3e211d99cfccf"}, - {file = "grpcio-1.68.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80af6f1e69c5e68a2be529990684abdd31ed6622e988bf18850075c81bb1ad6e"}, - {file = "grpcio-1.68.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:e8dbe3e00771bfe3d04feed8210fc6617006d06d9a2679b74605b9fed3e8362c"}, - {file = "grpcio-1.68.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:83bbf5807dc3ee94ce1de2dfe8a356e1d74101e4b9d7aa8c720cc4818a34aded"}, - {file = "grpcio-1.68.1-cp38-cp38-win32.whl", hash = "sha256:8cb620037a2fd9eeee97b4531880e439ebfcd6d7d78f2e7dcc3726428ab5ef63"}, - {file = "grpcio-1.68.1-cp38-cp38-win_amd64.whl", hash = "sha256:52fbf85aa71263380d330f4fce9f013c0798242e31ede05fcee7fbe40ccfc20d"}, - {file = "grpcio-1.68.1-cp39-cp39-linux_armv7l.whl", hash = "sha256:cb400138e73969eb5e0535d1d06cae6a6f7a15f2cc74add320e2130b8179211a"}, - {file = "grpcio-1.68.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a1b988b40f2fd9de5c820f3a701a43339d8dcf2cb2f1ca137e2c02671cc83ac1"}, - {file = "grpcio-1.68.1-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:96f473cdacfdd506008a5d7579c9f6a7ff245a9ade92c3c0265eb76cc591914f"}, - {file = "grpcio-1.68.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:37ea3be171f3cf3e7b7e412a98b77685eba9d4fd67421f4a34686a63a65d99f9"}, - {file = "grpcio-1.68.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ceb56c4285754e33bb3c2fa777d055e96e6932351a3082ce3559be47f8024f0"}, - {file = "grpcio-1.68.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:dffd29a2961f3263a16d73945b57cd44a8fd0b235740cb14056f0612329b345e"}, - {file = "grpcio-1.68.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:025f790c056815b3bf53da850dd70ebb849fd755a4b1ac822cb65cd631e37d43"}, - {file = "grpcio-1.68.1-cp39-cp39-win32.whl", hash = "sha256:1098f03dedc3b9810810568060dea4ac0822b4062f537b0f53aa015269be0a76"}, - {file = "grpcio-1.68.1-cp39-cp39-win_amd64.whl", hash = "sha256:334ab917792904245a028f10e803fcd5b6f36a7b2173a820c0b5b076555825e1"}, - {file = "grpcio-1.68.1.tar.gz", hash = "sha256:44a8502dd5de653ae6a73e2de50a401d84184f0331d0ac3daeb044e66d5c5054"}, + {file = "grpcio-1.69.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:2060ca95a8db295ae828d0fc1c7f38fb26ccd5edf9aa51a0f44251f5da332e97"}, + {file = "grpcio-1.69.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:2e52e107261fd8fa8fa457fe44bfadb904ae869d87c1280bf60f93ecd3e79278"}, + {file = "grpcio-1.69.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:316463c0832d5fcdb5e35ff2826d9aa3f26758d29cdfb59a368c1d6c39615a11"}, + {file = "grpcio-1.69.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:26c9a9c4ac917efab4704b18eed9082ed3b6ad19595f047e8173b5182fec0d5e"}, + {file = "grpcio-1.69.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90b3646ced2eae3a0599658eeccc5ba7f303bf51b82514c50715bdd2b109e5ec"}, + {file = "grpcio-1.69.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:3b75aea7c6cb91b341c85e7c1d9db1e09e1dd630b0717f836be94971e015031e"}, + {file = "grpcio-1.69.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5cfd14175f9db33d4b74d63de87c64bb0ee29ce475ce3c00c01ad2a3dc2a9e51"}, + {file = "grpcio-1.69.0-cp310-cp310-win32.whl", hash = "sha256:9031069d36cb949205293cf0e243abd5e64d6c93e01b078c37921493a41b72dc"}, + {file = "grpcio-1.69.0-cp310-cp310-win_amd64.whl", hash = "sha256:cc89b6c29f3dccbe12d7a3b3f1b3999db4882ae076c1c1f6df231d55dbd767a5"}, + {file = "grpcio-1.69.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:8de1b192c29b8ce45ee26a700044717bcbbd21c697fa1124d440548964328561"}, + {file = "grpcio-1.69.0-cp311-cp311-macosx_10_14_universal2.whl", hash = "sha256:7e76accf38808f5c5c752b0ab3fd919eb14ff8fafb8db520ad1cc12afff74de6"}, + {file = "grpcio-1.69.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:d5658c3c2660417d82db51e168b277e0ff036d0b0f859fa7576c0ffd2aec1442"}, + {file = "grpcio-1.69.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5494d0e52bf77a2f7eb17c6da662886ca0a731e56c1c85b93505bece8dc6cf4c"}, + {file = "grpcio-1.69.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4ed866f9edb574fd9be71bf64c954ce1b88fc93b2a4cbf94af221e9426eb14d6"}, + {file = "grpcio-1.69.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c5ba38aeac7a2fe353615c6b4213d1fbb3a3c34f86b4aaa8be08baaaee8cc56d"}, + {file = "grpcio-1.69.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f79e05f5bbf551c4057c227d1b041ace0e78462ac8128e2ad39ec58a382536d2"}, + {file = "grpcio-1.69.0-cp311-cp311-win32.whl", hash = "sha256:bf1f8be0da3fcdb2c1e9f374f3c2d043d606d69f425cd685110dd6d0d2d61258"}, + {file = "grpcio-1.69.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb9302afc3a0e4ba0b225cd651ef8e478bf0070cf11a529175caecd5ea2474e7"}, + {file = "grpcio-1.69.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:fc18a4de8c33491ad6f70022af5c460b39611e39578a4d84de0fe92f12d5d47b"}, + {file = "grpcio-1.69.0-cp312-cp312-macosx_10_14_universal2.whl", hash = "sha256:0f0270bd9ffbff6961fe1da487bdcd594407ad390cc7960e738725d4807b18c4"}, + {file = "grpcio-1.69.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:dc48f99cc05e0698e689b51a05933253c69a8c8559a47f605cff83801b03af0e"}, + {file = "grpcio-1.69.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e925954b18d41aeb5ae250262116d0970893b38232689c4240024e4333ac084"}, + {file = "grpcio-1.69.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87d222569273720366f68a99cb62e6194681eb763ee1d3b1005840678d4884f9"}, + {file = "grpcio-1.69.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:b62b0f41e6e01a3e5082000b612064c87c93a49b05f7602fe1b7aa9fd5171a1d"}, + {file = "grpcio-1.69.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:db6f9fd2578dbe37db4b2994c94a1d9c93552ed77dca80e1657bb8a05b898b55"}, + {file = "grpcio-1.69.0-cp312-cp312-win32.whl", hash = "sha256:b192b81076073ed46f4b4dd612b8897d9a1e39d4eabd822e5da7b38497ed77e1"}, + {file = "grpcio-1.69.0-cp312-cp312-win_amd64.whl", hash = "sha256:1227ff7836f7b3a4ab04e5754f1d001fa52a730685d3dc894ed8bc262cc96c01"}, + {file = "grpcio-1.69.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:a78a06911d4081a24a1761d16215a08e9b6d4d29cdbb7e427e6c7e17b06bcc5d"}, + {file = "grpcio-1.69.0-cp313-cp313-macosx_10_14_universal2.whl", hash = "sha256:dc5a351927d605b2721cbb46158e431dd49ce66ffbacb03e709dc07a491dde35"}, + {file = "grpcio-1.69.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:3629d8a8185f5139869a6a17865d03113a260e311e78fbe313f1a71603617589"}, + {file = "grpcio-1.69.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9a281878feeb9ae26db0622a19add03922a028d4db684658f16d546601a4870"}, + {file = "grpcio-1.69.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cc614e895177ab7e4b70f154d1a7c97e152577ea101d76026d132b7aaba003b"}, + {file = "grpcio-1.69.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:1ee76cd7e2e49cf9264f6812d8c9ac1b85dda0eaea063af07292400f9191750e"}, + {file = "grpcio-1.69.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:0470fa911c503af59ec8bc4c82b371ee4303ececbbdc055f55ce48e38b20fd67"}, + {file = "grpcio-1.69.0-cp313-cp313-win32.whl", hash = "sha256:b650f34aceac8b2d08a4c8d7dc3e8a593f4d9e26d86751ebf74ebf5107d927de"}, + {file = "grpcio-1.69.0-cp313-cp313-win_amd64.whl", hash = "sha256:028337786f11fecb5d7b7fa660475a06aabf7e5e52b5ac2df47414878c0ce7ea"}, + {file = "grpcio-1.69.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:b7f693db593d6bf285e015d5538bf1c86cf9c60ed30b6f7da04a00ed052fe2f3"}, + {file = "grpcio-1.69.0-cp38-cp38-macosx_10_14_universal2.whl", hash = "sha256:8b94e83f66dbf6fd642415faca0608590bc5e8d30e2c012b31d7d1b91b1de2fd"}, + {file = "grpcio-1.69.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:b634851b92c090763dde61df0868c730376cdb73a91bcc821af56ae043b09596"}, + {file = "grpcio-1.69.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bf5f680d3ed08c15330d7830d06bc65f58ca40c9999309517fd62880d70cb06e"}, + {file = "grpcio-1.69.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:200e48a6e7b00f804cf00a1c26292a5baa96507c7749e70a3ec10ca1a288936e"}, + {file = "grpcio-1.69.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:45a4704339b6e5b24b0e136dea9ad3815a94f30eb4f1e1d44c4ac484ef11d8dd"}, + {file = "grpcio-1.69.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:85d347cb8237751b23539981dbd2d9d8f6e9ff90082b427b13022b948eb6347a"}, + {file = "grpcio-1.69.0-cp38-cp38-win32.whl", hash = "sha256:60e5de105dc02832dc8f120056306d0ef80932bcf1c0e2b4ca3b676de6dc6505"}, + {file = "grpcio-1.69.0-cp38-cp38-win_amd64.whl", hash = "sha256:282f47d0928e40f25d007f24eb8fa051cb22551e3c74b8248bc9f9bea9c35fe0"}, + {file = "grpcio-1.69.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:dd034d68a2905464c49479b0c209c773737a4245d616234c79c975c7c90eca03"}, + {file = "grpcio-1.69.0-cp39-cp39-macosx_10_14_universal2.whl", hash = "sha256:01f834732c22a130bdf3dc154d1053bdbc887eb3ccb7f3e6285cfbfc33d9d5cc"}, + {file = "grpcio-1.69.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:a7f4ed0dcf202a70fe661329f8874bc3775c14bb3911d020d07c82c766ce0eb1"}, + {file = "grpcio-1.69.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd7ea241b10bc5f0bb0f82c0d7896822b7ed122b3ab35c9851b440c1ccf81588"}, + {file = "grpcio-1.69.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f03dc9b4da4c0dc8a1db7a5420f575251d7319b7a839004d8916257ddbe4816"}, + {file = "grpcio-1.69.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ca71d73a270dff052fe4edf74fef142d6ddd1f84175d9ac4a14b7280572ac519"}, + {file = "grpcio-1.69.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5ccbed100dc43704e94ccff9e07680b540d64e4cc89213ab2832b51b4f68a520"}, + {file = "grpcio-1.69.0-cp39-cp39-win32.whl", hash = "sha256:1514341def9c6ec4b7f0b9628be95f620f9d4b99331b7ef0a1845fd33d9b579c"}, + {file = "grpcio-1.69.0-cp39-cp39-win_amd64.whl", hash = "sha256:c1fea55d26d647346acb0069b08dca70984101f2dc95066e003019207212e303"}, + {file = "grpcio-1.69.0.tar.gz", hash = "sha256:936fa44241b5379c5afc344e1260d467bee495747eaf478de825bab2791da6f5"}, ] [package.extras] -protobuf = ["grpcio-tools (>=1.68.1)"] +protobuf = ["grpcio-tools (>=1.69.0)"] [[package]] name = "grpcio-status" @@ -1621,13 +1634,13 @@ files = [ [[package]] name = "huggingface-hub" -version = "0.26.5" +version = "0.27.1" description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub" optional = false python-versions = ">=3.8.0" files = [ - {file = "huggingface_hub-0.26.5-py3-none-any.whl", hash = "sha256:fb7386090bbe892072e64b85f7c4479fd2d65eea5f2543327c970d5169e83924"}, - {file = "huggingface_hub-0.26.5.tar.gz", hash = "sha256:1008bd18f60bfb65e8dbc0a97249beeeaa8c99d3c2fa649354df9fa5a13ed83b"}, + {file = "huggingface_hub-0.27.1-py3-none-any.whl", hash = "sha256:1c5155ca7d60b60c2e2fc38cbb3ffb7f7c3adf48f824015b219af9061771daec"}, + {file = "huggingface_hub-0.27.1.tar.gz", hash = "sha256:c004463ca870283909d715d20f066ebd6968c2207dae9393fdffb3c1d4d8f98b"}, ] [package.dependencies] @@ -1655,13 +1668,13 @@ typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "t [[package]] name = "identify" -version = "2.6.3" +version = "2.6.5" description = "File identification library for Python" optional = false python-versions = ">=3.9" files = [ - {file = "identify-2.6.3-py2.py3-none-any.whl", hash = "sha256:9edba65473324c2ea9684b1f944fe3191db3345e50b6d04571d10ed164f8d7bd"}, - {file = "identify-2.6.3.tar.gz", hash = "sha256:62f5dae9b5fef52c84cc188514e9ea4f3f636b1d8799ab5ebc475471f9e47a02"}, + {file = "identify-2.6.5-py2.py3-none-any.whl", hash = "sha256:14181a47091eb75b337af4c23078c9d09225cd4c48929f521f3bf16b09d02566"}, + {file = "identify-2.6.5.tar.gz", hash = "sha256:c10b33f250e5bba374fae86fb57f3adcebf1161bce7cdf92031915fd480c13bc"}, ] [package.extras] @@ -1706,13 +1719,13 @@ type = ["pytest-mypy"] [[package]] name = "importlib-resources" -version = "6.4.5" +version = "6.5.2" description = "Read resources from Python packages" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "importlib_resources-6.4.5-py3-none-any.whl", hash = "sha256:ac29d5f956f01d5e4bb63102a5a19957f1b9175e45649977264a1416783bb717"}, - {file = "importlib_resources-6.4.5.tar.gz", hash = "sha256:980862a1d16c9e147a59603677fa2aa5fd82b87f223b6cb870695bcfce830065"}, + {file = "importlib_resources-6.5.2-py3-none-any.whl", hash = "sha256:789cfdc3ed28c78b67a06acb8126751ced69a3d5f79c095a98298cd8a760ccec"}, + {file = "importlib_resources-6.5.2.tar.gz", hash = "sha256:185f87adef5bcc288449d98fb4fba07cea78bc036455dd44c5fc4a2fe78fed2c"}, ] [package.dependencies] @@ -1737,15 +1750,71 @@ files = [ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, ] +[[package]] +name = "ipython" +version = "8.18.1" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.9" +files = [ + {file = "ipython-8.18.1-py3-none-any.whl", hash = "sha256:e8267419d72d81955ec1177f8a29aaa90ac80ad647499201119e2f05e99aa397"}, + {file = "ipython-8.18.1.tar.gz", hash = "sha256:ca6f079bb33457c66e233e4580ebfc4128855b4cf6370dddd73842a9563e8a27"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +prompt-toolkit = ">=3.0.41,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" +typing-extensions = {version = "*", markers = "python_version < \"3.10\""} + +[package.extras] +all = ["black", "curio", "docrepr", "exceptiongroup", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +black = ["black"] +doc = ["docrepr", "exceptiongroup", "ipykernel", "matplotlib", "pickleshare", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio (<0.22)", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.22)", "pandas", "pickleshare", "pytest (<7.1)", "pytest-asyncio (<0.22)", "testpath", "trio"] + +[[package]] +name = "jedi" +version = "0.19.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, + {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, +] + +[package.dependencies] +parso = ">=0.8.4,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] + [[package]] name = "jinja2" -version = "3.1.4" +version = "3.1.5" description = "A very fast and expressive template engine." optional = false python-versions = ">=3.7" files = [ - {file = "jinja2-3.1.4-py3-none-any.whl", hash = "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d"}, - {file = "jinja2-3.1.4.tar.gz", hash = "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369"}, + {file = "jinja2-3.1.5-py3-none-any.whl", hash = "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb"}, + {file = "jinja2-3.1.5.tar.gz", hash = "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb"}, ] [package.dependencies] @@ -1843,7 +1912,7 @@ files = [ name = "jmespath" version = "1.0.1" description = "JSON Matching Expressions" -optional = false +optional = true python-versions = ">=3.7" files = [ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"}, @@ -1864,6 +1933,24 @@ files = [ [package.dependencies] attrs = ">=19.2.0" +[[package]] +name = "jsonpickle" +version = "4.0.1" +description = "jsonpickle encodes/decodes any Python object to/from JSON" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jsonpickle-4.0.1-py3-none-any.whl", hash = "sha256:2973c0b0d988c6792ed6c446fa582c48352e79c2880fa2c013f1abde15905555"}, + {file = "jsonpickle-4.0.1.tar.gz", hash = "sha256:b5336144d902958b92cb08bc1e76bfa47199b8afd454303693894defd2fa50c5"}, +] + +[package.extras] +cov = ["pytest-cov"] +dev = ["black", "pyupgrade"] +docs = ["furo", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +packaging = ["build", "setuptools (>=61.2)", "setuptools-scm[toml] (>=6.0)", "twine"] +testing = ["PyYAML", "atheris (>=2.3.0,<2.4.0)", "bson", "ecdsa", "feedparser", "gmpy2", "numpy", "pandas", "pymongo", "pytest (>=6.0,!=8.1.*)", "pytest-benchmark", "pytest-benchmark[histogram]", "pytest-checkdocs (>=1.2.3)", "pytest-enabler (>=1.0.1)", "pytest-ruff (>=0.2.1)", "scikit-learn", "scipy", "scipy (>=1.9.3)", "simplejson", "sqlalchemy", "ujson"] + [[package]] name = "kiwisolver" version = "1.4.7" @@ -2110,52 +2197,52 @@ files = [ [[package]] name = "matplotlib" -version = "3.9.3" +version = "3.9.4" description = "Python plotting package" optional = false python-versions = ">=3.9" files = [ - {file = "matplotlib-3.9.3-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:41b016e3be4e740b66c79a031a0a6e145728dbc248142e751e8dab4f3188ca1d"}, - {file = "matplotlib-3.9.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8e0143975fc2a6d7136c97e19c637321288371e8f09cff2564ecd73e865ea0b9"}, - {file = "matplotlib-3.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f459c8ee2c086455744723628264e43c884be0c7d7b45d84b8cd981310b4815"}, - {file = "matplotlib-3.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:687df7ceff57b8f070d02b4db66f75566370e7ae182a0782b6d3d21b0d6917dc"}, - {file = "matplotlib-3.9.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:edd14cf733fdc4f6e6fe3f705af97676a7e52859bf0044aa2c84e55be739241c"}, - {file = "matplotlib-3.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:1c40c244221a1adbb1256692b1133c6fb89418df27bf759a31a333e7912a4010"}, - {file = "matplotlib-3.9.3-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:cf2a60daf6cecff6828bc608df00dbc794380e7234d2411c0ec612811f01969d"}, - {file = "matplotlib-3.9.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:213d6dc25ce686516208d8a3e91120c6a4fdae4a3e06b8505ced5b716b50cc04"}, - {file = "matplotlib-3.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c52f48eb75fcc119a4fdb68ba83eb5f71656999420375df7c94cc68e0e14686e"}, - {file = "matplotlib-3.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d3c93796b44fa111049b88a24105e947f03c01966b5c0cc782e2ee3887b790a3"}, - {file = "matplotlib-3.9.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cd1077b9a09b16d8c3c7075a8add5ffbfe6a69156a57e290c800ed4d435bef1d"}, - {file = "matplotlib-3.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:c96eeeb8c68b662c7747f91a385688d4b449687d29b691eff7068a4602fe6dc4"}, - {file = "matplotlib-3.9.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:0a361bd5583bf0bcc08841df3c10269617ee2a36b99ac39d455a767da908bbbc"}, - {file = "matplotlib-3.9.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:e14485bb1b83eeb3d55b6878f9560240981e7bbc7a8d4e1e8c38b9bd6ec8d2de"}, - {file = "matplotlib-3.9.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a8d279f78844aad213c4935c18f8292a9432d51af2d88bca99072c903948045"}, - {file = "matplotlib-3.9.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6c12514329ac0d03128cf1dcceb335f4fbf7c11da98bca68dca8dcb983153a9"}, - {file = "matplotlib-3.9.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6e9de2b390d253a508dd497e9b5579f3a851f208763ed67fdca5dc0c3ea6849c"}, - {file = "matplotlib-3.9.3-cp312-cp312-win_amd64.whl", hash = "sha256:d796272408f8567ff7eaa00eb2856b3a00524490e47ad505b0b4ca6bb8a7411f"}, - {file = "matplotlib-3.9.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:203d18df84f5288973b2d56de63d4678cc748250026ca9e1ad8f8a0fd8a75d83"}, - {file = "matplotlib-3.9.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b651b0d3642991259109dc0351fc33ad44c624801367bb8307be9bfc35e427ad"}, - {file = "matplotlib-3.9.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:66d7b171fecf96940ce069923a08ba3df33ef542de82c2ff4fe8caa8346fa95a"}, - {file = "matplotlib-3.9.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be0ba61f6ff2e6b68e4270fb63b6813c9e7dec3d15fc3a93f47480444fd72f0"}, - {file = "matplotlib-3.9.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9d6b2e8856dec3a6db1ae51aec85c82223e834b228c1d3228aede87eee2b34f9"}, - {file = "matplotlib-3.9.3-cp313-cp313-win_amd64.whl", hash = "sha256:90a85a004fefed9e583597478420bf904bb1a065b0b0ee5b9d8d31b04b0f3f70"}, - {file = "matplotlib-3.9.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:3119b2f16de7f7b9212ba76d8fe6a0e9f90b27a1e04683cd89833a991682f639"}, - {file = "matplotlib-3.9.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:87ad73763d93add1b6c1f9fcd33af662fd62ed70e620c52fcb79f3ac427cf3a6"}, - {file = "matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:026bdf3137ab6022c866efa4813b6bbeddc2ed4c9e7e02f0e323a7bca380dfa0"}, - {file = "matplotlib-3.9.3-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:760a5e89ebbb172989e8273024a1024b0f084510b9105261b3b00c15e9c9f006"}, - {file = "matplotlib-3.9.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:a42b9dc42de2cfe357efa27d9c50c7833fc5ab9b2eb7252ccd5d5f836a84e1e4"}, - {file = "matplotlib-3.9.3-cp313-cp313t-win_amd64.whl", hash = "sha256:e0fcb7da73fbf67b5f4bdaa57d85bb585a4e913d4a10f3e15b32baea56a67f0a"}, - {file = "matplotlib-3.9.3-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:031b7f5b8e595cc07def77ec5b58464e9bb67dc5760be5d6f26d9da24892481d"}, - {file = "matplotlib-3.9.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9fa6e193c14d6944e0685cdb527cb6b38b0e4a518043e7212f214113af7391da"}, - {file = "matplotlib-3.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e6eefae6effa0c35bbbc18c25ee6e0b1da44d2359c3cd526eb0c9e703cf055d"}, - {file = "matplotlib-3.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:10d3e5c7a99bd28afb957e1ae661323b0800d75b419f24d041ed1cc5d844a764"}, - {file = "matplotlib-3.9.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:816a966d5d376bf24c92af8f379e78e67278833e4c7cbc9fa41872eec629a060"}, - {file = "matplotlib-3.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fb0b37c896172899a4a93d9442ffdc6f870165f59e05ce2e07c6fded1c15749"}, - {file = "matplotlib-3.9.3-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:5f2a4ea08e6876206d511365b0bc234edc813d90b930be72c3011bbd7898796f"}, - {file = "matplotlib-3.9.3-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:9b081dac96ab19c54fd8558fac17c9d2c9cb5cc4656e7ed3261ddc927ba3e2c5"}, - {file = "matplotlib-3.9.3-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a0a63cb8404d1d1f94968ef35738900038137dab8af836b6c21bb6f03d75465"}, - {file = "matplotlib-3.9.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:896774766fd6be4571a43bc2fcbcb1dcca0807e53cab4a5bf88c4aa861a08e12"}, - {file = "matplotlib-3.9.3.tar.gz", hash = "sha256:cd5dbbc8e25cad5f706845c4d100e2c8b34691b412b93717ce38d8ae803bcfa5"}, + {file = "matplotlib-3.9.4-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:c5fdd7abfb706dfa8d307af64a87f1a862879ec3cd8d0ec8637458f0885b9c50"}, + {file = "matplotlib-3.9.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d89bc4e85e40a71d1477780366c27fb7c6494d293e1617788986f74e2a03d7ff"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ddf9f3c26aae695c5daafbf6b94e4c1a30d6cd617ba594bbbded3b33a1fcfa26"}, + {file = "matplotlib-3.9.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:18ebcf248030173b59a868fda1fe42397253f6698995b55e81e1f57431d85e50"}, + {file = "matplotlib-3.9.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:974896ec43c672ec23f3f8c648981e8bc880ee163146e0312a9b8def2fac66f5"}, + {file = "matplotlib-3.9.4-cp310-cp310-win_amd64.whl", hash = "sha256:4598c394ae9711cec135639374e70871fa36b56afae17bdf032a345be552a88d"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:d4dd29641d9fb8bc4492420c5480398dd40a09afd73aebe4eb9d0071a05fbe0c"}, + {file = "matplotlib-3.9.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30e5b22e8bcfb95442bf7d48b0d7f3bdf4a450cbf68986ea45fca3d11ae9d099"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2bb0030d1d447fd56dcc23b4c64a26e44e898f0416276cac1ebc25522e0ac249"}, + {file = "matplotlib-3.9.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aca90ed222ac3565d2752b83dbb27627480d27662671e4d39da72e97f657a423"}, + {file = "matplotlib-3.9.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a181b2aa2906c608fcae72f977a4a2d76e385578939891b91c2550c39ecf361e"}, + {file = "matplotlib-3.9.4-cp311-cp311-win_amd64.whl", hash = "sha256:1f6882828231eca17f501c4dcd98a05abb3f03d157fbc0769c6911fe08b6cfd3"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:dfc48d67e6661378a21c2983200a654b72b5c5cdbd5d2cf6e5e1ece860f0cc70"}, + {file = "matplotlib-3.9.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:47aef0fab8332d02d68e786eba8113ffd6f862182ea2999379dec9e237b7e483"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fba1f52c6b7dc764097f52fd9ab627b90db452c9feb653a59945de16752e965f"}, + {file = "matplotlib-3.9.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:173ac3748acaac21afcc3fa1633924609ba1b87749006bc25051c52c422a5d00"}, + {file = "matplotlib-3.9.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:320edea0cadc07007765e33f878b13b3738ffa9745c5f707705692df70ffe0e0"}, + {file = "matplotlib-3.9.4-cp312-cp312-win_amd64.whl", hash = "sha256:a4a4cfc82330b27042a7169533da7991e8789d180dd5b3daeaee57d75cd5a03b"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:37eeffeeca3c940985b80f5b9a7b95ea35671e0e7405001f249848d2b62351b6"}, + {file = "matplotlib-3.9.4-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3e7465ac859ee4abcb0d836137cd8414e7bb7ad330d905abced457217d4f0f45"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4c12302c34afa0cf061bea23b331e747e5e554b0fa595c96e01c7b75bc3b858"}, + {file = "matplotlib-3.9.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b8c97917f21b75e72108b97707ba3d48f171541a74aa2a56df7a40626bafc64"}, + {file = "matplotlib-3.9.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0229803bd7e19271b03cb09f27db76c918c467aa4ce2ae168171bc67c3f508df"}, + {file = "matplotlib-3.9.4-cp313-cp313-win_amd64.whl", hash = "sha256:7c0d8ef442ebf56ff5e206f8083d08252ee738e04f3dc88ea882853a05488799"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a04c3b00066a688834356d196136349cb32f5e1003c55ac419e91585168b88fb"}, + {file = "matplotlib-3.9.4-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:04c519587f6c210626741a1e9a68eefc05966ede24205db8982841826af5871a"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:308afbf1a228b8b525fcd5cec17f246bbbb63b175a3ef6eb7b4d33287ca0cf0c"}, + {file = "matplotlib-3.9.4-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ddb3b02246ddcffd3ce98e88fed5b238bc5faff10dbbaa42090ea13241d15764"}, + {file = "matplotlib-3.9.4-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8a75287e9cb9eee48cb79ec1d806f75b29c0fde978cb7223a1f4c5848d696041"}, + {file = "matplotlib-3.9.4-cp313-cp313t-win_amd64.whl", hash = "sha256:488deb7af140f0ba86da003e66e10d55ff915e152c78b4b66d231638400b1965"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:3c3724d89a387ddf78ff88d2a30ca78ac2b4c89cf37f2db4bd453c34799e933c"}, + {file = "matplotlib-3.9.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d5f0a8430ffe23d7e32cfd86445864ccad141797f7d25b7c41759a5b5d17cfd7"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6bb0141a21aef3b64b633dc4d16cbd5fc538b727e4958be82a0e1c92a234160e"}, + {file = "matplotlib-3.9.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:57aa235109e9eed52e2c2949db17da185383fa71083c00c6c143a60e07e0888c"}, + {file = "matplotlib-3.9.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:b18c600061477ccfdd1e6fd050c33d8be82431700f3452b297a56d9ed7037abb"}, + {file = "matplotlib-3.9.4-cp39-cp39-win_amd64.whl", hash = "sha256:ef5f2d1b67d2d2145ff75e10f8c008bfbf71d45137c4b648c87193e7dd053eac"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:44e0ed786d769d85bc787b0606a53f2d8d2d1d3c8a2608237365e9121c1a338c"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:09debb9ce941eb23ecdbe7eab972b1c3e0276dcf01688073faff7b0f61d6c6ca"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc53cf157a657bfd03afab14774d54ba73aa84d42cfe2480c91bd94873952db"}, + {file = "matplotlib-3.9.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ad45da51be7ad02387801fd154ef74d942f49fe3fcd26a64c94842ba7ec0d865"}, + {file = "matplotlib-3.9.4.tar.gz", hash = "sha256:1e00e8be7393cbdc6fedfa8a6fba02cf3e83814b285db1c60b906a023ba41bc3"}, ] [package.dependencies] @@ -2171,7 +2258,21 @@ pyparsing = ">=2.3.1" python-dateutil = ">=2.7" [package.extras] -dev = ["meson-python (>=0.13.1)", "numpy (>=1.25)", "pybind11 (>=2.6,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] +dev = ["meson-python (>=0.13.1,<0.17.0)", "numpy (>=1.25)", "pybind11 (>=2.6,!=2.13.3)", "setuptools (>=64)", "setuptools_scm (>=7)"] + +[[package]] +name = "matplotlib-inline" +version = "0.1.7" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, + {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, +] + +[package.dependencies] +traitlets = "*" [[package]] name = "mpmath" @@ -2354,49 +2455,55 @@ dill = ">=0.3.8" [[package]] name = "mypy" -version = "1.13.0" +version = "1.14.1" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, - {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, - {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, - {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, - {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, - {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, - {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, - {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, - {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, - {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, - {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, - {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, - {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, - {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, - {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, - {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, - {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, - {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, - {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, - {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, - {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, - {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, - {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, - {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, - {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, - {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, - {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:52686e37cf13d559f668aa398dd7ddf1f92c5d613e4f8cb262be2fb4fedb0fcb"}, + {file = "mypy-1.14.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1fb545ca340537d4b45d3eecdb3def05e913299ca72c290326be19b3804b39c0"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:90716d8b2d1f4cd503309788e51366f07c56635a3309b0f6a32547eaaa36a64d"}, + {file = "mypy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ae753f5c9fef278bcf12e1a564351764f2a6da579d4a81347e1d5a15819997b"}, + {file = "mypy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:e0fe0f5feaafcb04505bcf439e991c6d8f1bf8b15f12b05feeed96e9e7bf1427"}, + {file = "mypy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:7d54bd85b925e501c555a3227f3ec0cfc54ee8b6930bd6141ec872d1c572f81f"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f995e511de847791c3b11ed90084a7a0aafdc074ab88c5a9711622fe4751138c"}, + {file = "mypy-1.14.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d64169ec3b8461311f8ce2fd2eb5d33e2d0f2c7b49116259c51d0d96edee48d1"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba24549de7b89b6381b91fbc068d798192b1b5201987070319889e93038967a8"}, + {file = "mypy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:183cf0a45457d28ff9d758730cd0210419ac27d4d3f285beda038c9083363b1f"}, + {file = "mypy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f2a0ecc86378f45347f586e4163d1769dd81c5a223d577fe351f26b179e148b1"}, + {file = "mypy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:ad3301ebebec9e8ee7135d8e3109ca76c23752bac1e717bc84cd3836b4bf3eae"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:30ff5ef8519bbc2e18b3b54521ec319513a26f1bba19a7582e7b1f58a6e69f14"}, + {file = "mypy-1.14.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:cb9f255c18052343c70234907e2e532bc7e55a62565d64536dbc7706a20b78b9"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8b4e3413e0bddea671012b063e27591b953d653209e7a4fa5e48759cda77ca11"}, + {file = "mypy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:553c293b1fbdebb6c3c4030589dab9fafb6dfa768995a453d8a5d3b23784af2e"}, + {file = "mypy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fad79bfe3b65fe6a1efaed97b445c3d37f7be9fdc348bdb2d7cac75579607c89"}, + {file = "mypy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:8fa2220e54d2946e94ab6dbb3ba0a992795bd68b16dc852db33028df2b00191b"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:92c3ed5afb06c3a8e188cb5da4984cab9ec9a77ba956ee419c68a388b4595255"}, + {file = "mypy-1.14.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:dbec574648b3e25f43d23577309b16534431db4ddc09fda50841f1e34e64ed34"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8c6d94b16d62eb3e947281aa7347d78236688e21081f11de976376cf010eb31a"}, + {file = "mypy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d4b19b03fdf54f3c5b2fa474c56b4c13c9dbfb9a2db4370ede7ec11a2c5927d9"}, + {file = "mypy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:0c911fde686394753fff899c409fd4e16e9b294c24bfd5e1ea4675deae1ac6fd"}, + {file = "mypy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:8b21525cb51671219f5307be85f7e646a153e5acc656e5cebf64bfa076c50107"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:7084fb8f1128c76cd9cf68fe5971b37072598e7c31b2f9f95586b65c741a9d31"}, + {file = "mypy-1.14.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f845a00b4f420f693f870eaee5f3e2692fa84cc8514496114649cfa8fd5e2c6"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:44bf464499f0e3a2d14d58b54674dee25c031703b2ffc35064bd0df2e0fac319"}, + {file = "mypy-1.14.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c99f27732c0b7dc847adb21c9d47ce57eb48fa33a17bc6d7d5c5e9f9e7ae5bac"}, + {file = "mypy-1.14.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:bce23c7377b43602baa0bd22ea3265c49b9ff0b76eb315d6c34721af4cdf1d9b"}, + {file = "mypy-1.14.1-cp38-cp38-win_amd64.whl", hash = "sha256:8edc07eeade7ebc771ff9cf6b211b9a7d93687ff892150cb5692e4f4272b0837"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3888a1816d69f7ab92092f785a462944b3ca16d7c470d564165fe703b0970c35"}, + {file = "mypy-1.14.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:46c756a444117c43ee984bd055db99e498bc613a70bbbc120272bd13ca579fbc"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:27fc248022907e72abfd8e22ab1f10e903915ff69961174784a3900a8cba9ad9"}, + {file = "mypy-1.14.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:499d6a72fb7e5de92218db961f1a66d5f11783f9ae549d214617edab5d4dbdbb"}, + {file = "mypy-1.14.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:57961db9795eb566dc1d1b4e9139ebc4c6b0cb6e7254ecde69d1552bf7613f60"}, + {file = "mypy-1.14.1-cp39-cp39-win_amd64.whl", hash = "sha256:07ba89fdcc9451f2ebb02853deb6aaaa3d2239a236669a63ab3801bbf923ef5c"}, + {file = "mypy-1.14.1-py3-none-any.whl", hash = "sha256:b66a60cc4073aeb8ae00057f9c1f64d49e90f918fbcef9a977eb121da8b8f1d1"}, + {file = "mypy-1.14.1.tar.gz", hash = "sha256:7ec88144fe9b510e8475ec2f5f251992690fcf89ccb4500b214b4226abcd32d6"}, ] [package.dependencies] -mypy-extensions = ">=1.0.0" +mypy_extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.6.0" +typing_extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] @@ -2458,47 +2565,56 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" +version = "2.0.2" description = "Fundamental package for array computing in Python" optional = false python-versions = ">=3.9" files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:51129a29dbe56f9ca83438b706e2e69a39892b5eda6cedcb6b0c9fdc9b0d3ece"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f15975dfec0cf2239224d80e32c3170b1d168335eaedee69da84fbe9f1f9cd04"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8c5713284ce4e282544c68d1c3b2c7161d38c256d2eefc93c1d683cf47683e66"}, + {file = "numpy-2.0.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:becfae3ddd30736fe1889a37f1f580e245ba79a5855bff5f2a29cb3ccc22dd7b"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2da5960c3cf0df7eafefd806d4e612c5e19358de82cb3c343631188991566ccd"}, + {file = "numpy-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:496f71341824ed9f3d2fd36cf3ac57ae2e0165c143b55c3a035ee219413f3318"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a61ec659f68ae254e4d237816e33171497e978140353c0c2038d46e63282d0c8"}, + {file = "numpy-2.0.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d731a1c6116ba289c1e9ee714b08a8ff882944d4ad631fd411106a30f083c326"}, + {file = "numpy-2.0.2-cp310-cp310-win32.whl", hash = "sha256:984d96121c9f9616cd33fbd0618b7f08e0cfc9600a7ee1d6fd9b239186d19d97"}, + {file = "numpy-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:c7b0be4ef08607dd04da4092faee0b86607f111d5ae68036f16cc787e250a131"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:49ca4decb342d66018b01932139c0961a8f9ddc7589611158cb3c27cbcf76448"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:11a76c372d1d37437857280aa142086476136a8c0f373b2e648ab2c8f18fb195"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:807ec44583fd708a21d4a11d94aedf2f4f3c3719035c76a2bbe1fe8e217bdc57"}, + {file = "numpy-2.0.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8cafab480740e22f8d833acefed5cc87ce276f4ece12fdaa2e8903db2f82897a"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a15f476a45e6e5a3a79d8a14e62161d27ad897381fecfa4a09ed5322f2085669"}, + {file = "numpy-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13e689d772146140a252c3a28501da66dfecd77490b498b168b501835041f951"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9ea91dfb7c3d1c56a0e55657c0afb38cf1eeae4544c208dc465c3c9f3a7c09f9"}, + {file = "numpy-2.0.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c1c9307701fec8f3f7a1e6711f9089c06e6284b3afbbcd259f7791282d660a15"}, + {file = "numpy-2.0.2-cp311-cp311-win32.whl", hash = "sha256:a392a68bd329eafac5817e5aefeb39038c48b671afd242710b451e76090e81f4"}, + {file = "numpy-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:286cd40ce2b7d652a6f22efdfc6d1edf879440e53e76a75955bc0c826c7e64dc"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:df55d490dea7934f330006d0f81e8551ba6010a5bf035a249ef61a94f21c500b"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8df823f570d9adf0978347d1f926b2a867d5608f434a7cff7f7908c6570dcf5e"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:9a92ae5c14811e390f3767053ff54eaee3bf84576d99a2456391401323f4ec2c"}, + {file = "numpy-2.0.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:a842d573724391493a97a62ebbb8e731f8a5dcc5d285dfc99141ca15a3302d0c"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c05e238064fc0610c840d1cf6a13bf63d7e391717d247f1bf0318172e759e692"}, + {file = "numpy-2.0.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0123ffdaa88fa4ab64835dcbde75dcdf89c453c922f18dced6e27c90d1d0ec5a"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:96a55f64139912d61de9137f11bf39a55ec8faec288c75a54f93dfd39f7eb40c"}, + {file = "numpy-2.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ec9852fb39354b5a45a80bdab5ac02dd02b15f44b3804e9f00c556bf24b4bded"}, + {file = "numpy-2.0.2-cp312-cp312-win32.whl", hash = "sha256:671bec6496f83202ed2d3c8fdc486a8fc86942f2e69ff0e986140339a63bcbe5"}, + {file = "numpy-2.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:cfd41e13fdc257aa5778496b8caa5e856dc4896d4ccf01841daee1d96465467a"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9059e10581ce4093f735ed23f3b9d283b9d517ff46009ddd485f1747eb22653c"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:423e89b23490805d2a5a96fe40ec507407b8ee786d66f7328be214f9679df6dd"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_arm64.whl", hash = "sha256:2b2955fa6f11907cf7a70dab0d0755159bca87755e831e47932367fc8f2f2d0b"}, + {file = "numpy-2.0.2-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:97032a27bd9d8988b9a97a8c4d2c9f2c15a81f61e2f21404d7e8ef00cb5be729"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e795a8be3ddbac43274f18588329c72939870a16cae810c2b73461c40718ab1"}, + {file = "numpy-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f26b258c385842546006213344c50655ff1555a9338e2e5e02a0756dc3e803dd"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5fec9451a7789926bcf7c2b8d187292c9f93ea30284802a0ab3f5be8ab36865d"}, + {file = "numpy-2.0.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:9189427407d88ff25ecf8f12469d4d39d35bee1db5d39fc5c168c6f088a6956d"}, + {file = "numpy-2.0.2-cp39-cp39-win32.whl", hash = "sha256:905d16e0c60200656500c95b6b8dca5d109e23cb24abc701d41c02d74c6b3afa"}, + {file = "numpy-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:a3f4ab0caa7f053f6797fcd4e1e25caee367db3112ef2b6ef82d749530768c73"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:7f0a0c6f12e07fa94133c8a67404322845220c06a9e80e85999afe727f7438b8"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-macosx_14_0_x86_64.whl", hash = "sha256:312950fdd060354350ed123c0e25a71327d3711584beaef30cdaa93320c392d4"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26df23238872200f63518dd2aa984cfca675d82469535dc7162dc2ee52d9dd5c"}, + {file = "numpy-2.0.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a46288ec55ebbd58947d31d72be2c63cbf839f0a63b49cb755022310792a3385"}, + {file = "numpy-2.0.2.tar.gz", hash = "sha256:883c987dee1880e2a864ab0dc9892292582510604156762362d9326444636e78"}, ] [[package]] @@ -2672,13 +2788,13 @@ httpx = ">=0.27.0,<0.28.0" [[package]] name = "openai" -version = "1.57.1" +version = "1.59.7" description = "The official Python library for the openai API" optional = false python-versions = ">=3.8" files = [ - {file = "openai-1.57.1-py3-none-any.whl", hash = "sha256:3865686c927e93492d1145938d4a24b634951531c4b2769d43ca5dbd4b25d8fd"}, - {file = "openai-1.57.1.tar.gz", hash = "sha256:a95f22e04ab3df26e64a15d958342265e802314131275908b3b3e36f8c5d4377"}, + {file = "openai-1.59.7-py3-none-any.whl", hash = "sha256:cfa806556226fa96df7380ab2e29814181d56fea44738c2b0e581b462c268692"}, + {file = "openai-1.59.7.tar.gz", hash = "sha256:043603def78c00befb857df9f0a16ee76a3af5984ba40cb7ee5e2f40db4646bf"}, ] [package.dependencies] @@ -2693,6 +2809,7 @@ typing-extensions = ">=4.11,<5" [package.extras] datalib = ["numpy (>=1)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"] +realtime = ["websockets (>=13,<15)"] [[package]] name = "overrides" @@ -2816,6 +2933,35 @@ files = [ [package.extras] dev = ["jinja2"] +[[package]] +name = "parso" +version = "0.8.4" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, + {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, +] + +[package.extras] +qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] +testing = ["docopt", "pytest"] + +[[package]] +name = "pexpect" +version = "4.9.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, + {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + [[package]] name = "pgvector" version = "0.3.6" @@ -2832,93 +2978,89 @@ numpy = "*" [[package]] name = "pillow" -version = "11.0.0" +version = "11.1.0" description = "Python Imaging Library (Fork)" optional = false python-versions = ">=3.9" files = [ - {file = "pillow-11.0.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:6619654954dc4936fcff82db8eb6401d3159ec6be81e33c6000dfd76ae189947"}, - {file = "pillow-11.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b3c5ac4bed7519088103d9450a1107f76308ecf91d6dabc8a33a2fcfb18d0fba"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a65149d8ada1055029fcb665452b2814fe7d7082fcb0c5bed6db851cb69b2086"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:88a58d8ac0cc0e7f3a014509f0455248a76629ca9b604eca7dc5927cc593c5e9"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:c26845094b1af3c91852745ae78e3ea47abf3dbcd1cf962f16b9a5fbe3ee8488"}, - {file = "pillow-11.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:1a61b54f87ab5786b8479f81c4b11f4d61702830354520837f8cc791ebba0f5f"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:674629ff60030d144b7bca2b8330225a9b11c482ed408813924619c6f302fdbb"}, - {file = "pillow-11.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:598b4e238f13276e0008299bd2482003f48158e2b11826862b1eb2ad7c768b97"}, - {file = "pillow-11.0.0-cp310-cp310-win32.whl", hash = "sha256:9a0f748eaa434a41fccf8e1ee7a3eed68af1b690e75328fd7a60af123c193b50"}, - {file = "pillow-11.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:a5629742881bcbc1f42e840af185fd4d83a5edeb96475a575f4da50d6ede337c"}, - {file = "pillow-11.0.0-cp310-cp310-win_arm64.whl", hash = "sha256:ee217c198f2e41f184f3869f3e485557296d505b5195c513b2bfe0062dc537f1"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1c1d72714f429a521d8d2d018badc42414c3077eb187a59579f28e4270b4b0fc"}, - {file = "pillow-11.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:499c3a1b0d6fc8213519e193796eb1a86a1be4b1877d678b30f83fd979811d1a"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8b2351c85d855293a299038e1f89db92a2f35e8d2f783489c6f0b2b5f3fe8a3"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f4dba50cfa56f910241eb7f883c20f1e7b1d8f7d91c750cd0b318bad443f4d5"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:5ddbfd761ee00c12ee1be86c9c0683ecf5bb14c9772ddbd782085779a63dd55b"}, - {file = "pillow-11.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:45c566eb10b8967d71bf1ab8e4a525e5a93519e29ea071459ce517f6b903d7fa"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b4fd7bd29610a83a8c9b564d457cf5bd92b4e11e79a4ee4716a63c959699b306"}, - {file = "pillow-11.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cb929ca942d0ec4fac404cbf520ee6cac37bf35be479b970c4ffadf2b6a1cad9"}, - {file = "pillow-11.0.0-cp311-cp311-win32.whl", hash = "sha256:006bcdd307cc47ba43e924099a038cbf9591062e6c50e570819743f5607404f5"}, - {file = "pillow-11.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:52a2d8323a465f84faaba5236567d212c3668f2ab53e1c74c15583cf507a0291"}, - {file = "pillow-11.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:16095692a253047fe3ec028e951fa4221a1f3ed3d80c397e83541a3037ff67c9"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d2c0a187a92a1cb5ef2c8ed5412dd8d4334272617f532d4ad4de31e0495bd923"}, - {file = "pillow-11.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:084a07ef0821cfe4858fe86652fffac8e187b6ae677e9906e192aafcc1b69903"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8069c5179902dcdce0be9bfc8235347fdbac249d23bd90514b7a47a72d9fecf4"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f02541ef64077f22bf4924f225c0fd1248c168f86e4b7abdedd87d6ebaceab0f"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:fcb4621042ac4b7865c179bb972ed0da0218a076dc1820ffc48b1d74c1e37fe9"}, - {file = "pillow-11.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:00177a63030d612148e659b55ba99527803288cea7c75fb05766ab7981a8c1b7"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8853a3bf12afddfdf15f57c4b02d7ded92c7a75a5d7331d19f4f9572a89c17e6"}, - {file = "pillow-11.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3107c66e43bda25359d5ef446f59c497de2b5ed4c7fdba0894f8d6cf3822dafc"}, - {file = "pillow-11.0.0-cp312-cp312-win32.whl", hash = "sha256:86510e3f5eca0ab87429dd77fafc04693195eec7fd6a137c389c3eeb4cfb77c6"}, - {file = "pillow-11.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:8ec4a89295cd6cd4d1058a5e6aec6bf51e0eaaf9714774e1bfac7cfc9051db47"}, - {file = "pillow-11.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:27a7860107500d813fcd203b4ea19b04babe79448268403172782754870dac25"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:bcd1fb5bb7b07f64c15618c89efcc2cfa3e95f0e3bcdbaf4642509de1942a699"}, - {file = "pillow-11.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0e038b0745997c7dcaae350d35859c9715c71e92ffb7e0f4a8e8a16732150f38"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0ae08bd8ffc41aebf578c2af2f9d8749d91f448b3bfd41d7d9ff573d74f2a6b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d69bfd8ec3219ae71bcde1f942b728903cad25fafe3100ba2258b973bd2bc1b2"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:61b887f9ddba63ddf62fd02a3ba7add935d053b6dd7d58998c630e6dbade8527"}, - {file = "pillow-11.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:c6a660307ca9d4867caa8d9ca2c2658ab685de83792d1876274991adec7b93fa"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:73e3a0200cdda995c7e43dd47436c1548f87a30bb27fb871f352a22ab8dcf45f"}, - {file = "pillow-11.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fba162b8872d30fea8c52b258a542c5dfd7b235fb5cb352240c8d63b414013eb"}, - {file = "pillow-11.0.0-cp313-cp313-win32.whl", hash = "sha256:f1b82c27e89fffc6da125d5eb0ca6e68017faf5efc078128cfaa42cf5cb38798"}, - {file = "pillow-11.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:8ba470552b48e5835f1d23ecb936bb7f71d206f9dfeee64245f30c3270b994de"}, - {file = "pillow-11.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:846e193e103b41e984ac921b335df59195356ce3f71dcfd155aa79c603873b84"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:4ad70c4214f67d7466bea6a08061eba35c01b1b89eaa098040a35272a8efb22b"}, - {file = "pillow-11.0.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:6ec0d5af64f2e3d64a165f490d96368bb5dea8b8f9ad04487f9ab60dc4bb6003"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c809a70e43c7977c4a42aefd62f0131823ebf7dd73556fa5d5950f5b354087e2"}, - {file = "pillow-11.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:4b60c9520f7207aaf2e1d94de026682fc227806c6e1f55bba7606d1c94dd623a"}, - {file = "pillow-11.0.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:1e2688958a840c822279fda0086fec1fdab2f95bf2b717b66871c4ad9859d7e8"}, - {file = "pillow-11.0.0-cp313-cp313t-win32.whl", hash = "sha256:607bbe123c74e272e381a8d1957083a9463401f7bd01287f50521ecb05a313f8"}, - {file = "pillow-11.0.0-cp313-cp313t-win_amd64.whl", hash = "sha256:5c39ed17edea3bc69c743a8dd3e9853b7509625c2462532e62baa0732163a904"}, - {file = "pillow-11.0.0-cp313-cp313t-win_arm64.whl", hash = "sha256:75acbbeb05b86bc53cbe7b7e6fe00fbcf82ad7c684b3ad82e3d711da9ba287d3"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:2e46773dc9f35a1dd28bd6981332fd7f27bec001a918a72a79b4133cf5291dba"}, - {file = "pillow-11.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2679d2258b7f1192b378e2893a8a0a0ca472234d4c2c0e6bdd3380e8dfa21b6a"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eda2616eb2313cbb3eebbe51f19362eb434b18e3bb599466a1ffa76a033fb916"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ec184af98a121fb2da42642dea8a29ec80fc3efbaefb86d8fdd2606619045d"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:8594f42df584e5b4bb9281799698403f7af489fba84c34d53d1c4bfb71b7c4e7"}, - {file = "pillow-11.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:c12b5ae868897c7338519c03049a806af85b9b8c237b7d675b8c5e089e4a618e"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:70fbbdacd1d271b77b7721fe3cdd2d537bbbd75d29e6300c672ec6bb38d9672f"}, - {file = "pillow-11.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:5178952973e588b3f1360868847334e9e3bf49d19e169bbbdfaf8398002419ae"}, - {file = "pillow-11.0.0-cp39-cp39-win32.whl", hash = "sha256:8c676b587da5673d3c75bd67dd2a8cdfeb282ca38a30f37950511766b26858c4"}, - {file = "pillow-11.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:94f3e1780abb45062287b4614a5bc0874519c86a777d4a7ad34978e86428b8dd"}, - {file = "pillow-11.0.0-cp39-cp39-win_arm64.whl", hash = "sha256:290f2cc809f9da7d6d622550bbf4c1e57518212da51b6a30fe8e0a270a5b78bd"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:1187739620f2b365de756ce086fdb3604573337cc28a0d3ac4a01ab6b2d2a6d2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:fbbcb7b57dc9c794843e3d1258c0fbf0f48656d46ffe9e09b63bbd6e8cd5d0a2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d203af30149ae339ad1b4f710d9844ed8796e97fda23ffbc4cc472968a47d0b"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21a0d3b115009ebb8ac3d2ebec5c2982cc693da935f4ab7bb5c8ebe2f47d36f2"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:73853108f56df97baf2bb8b522f3578221e56f646ba345a372c78326710d3830"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e58876c91f97b0952eb766123bfef372792ab3f4e3e1f1a2267834c2ab131734"}, - {file = "pillow-11.0.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:224aaa38177597bb179f3ec87eeefcce8e4f85e608025e9cfac60de237ba6316"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:5bd2d3bdb846d757055910f0a59792d33b555800813c3b39ada1829c372ccb06"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:375b8dd15a1f5d2feafff536d47e22f69625c1aa92f12b339ec0b2ca40263273"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:daffdf51ee5db69a82dd127eabecce20729e21f7a3680cf7cbb23f0829189790"}, - {file = "pillow-11.0.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7326a1787e3c7b0429659e0a944725e1b03eeaa10edd945a86dead1913383944"}, - {file = "pillow-11.0.0.tar.gz", hash = "sha256:72bacbaf24ac003fea9bff9837d1eedb6088758d41e100c1552930151f677739"}, + {file = "pillow-11.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:e1abe69aca89514737465752b4bcaf8016de61b3be1397a8fc260ba33321b3a8"}, + {file = "pillow-11.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c640e5a06869c75994624551f45e5506e4256562ead981cce820d5ab39ae2192"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a07dba04c5e22824816b2615ad7a7484432d7f540e6fa86af60d2de57b0fcee2"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e267b0ed063341f3e60acd25c05200df4193e15a4a5807075cd71225a2386e26"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:bd165131fd51697e22421d0e467997ad31621b74bfc0b75956608cb2906dda07"}, + {file = "pillow-11.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:abc56501c3fd148d60659aae0af6ddc149660469082859fa7b066a298bde9482"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:54ce1c9a16a9561b6d6d8cb30089ab1e5eb66918cb47d457bd996ef34182922e"}, + {file = "pillow-11.1.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:73ddde795ee9b06257dac5ad42fcb07f3b9b813f8c1f7f870f402f4dc54b5269"}, + {file = "pillow-11.1.0-cp310-cp310-win32.whl", hash = "sha256:3a5fe20a7b66e8135d7fd617b13272626a28278d0e578c98720d9ba4b2439d49"}, + {file = "pillow-11.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b6123aa4a59d75f06e9dd3dac5bf8bc9aa383121bb3dd9a7a612e05eabc9961a"}, + {file = "pillow-11.1.0-cp310-cp310-win_arm64.whl", hash = "sha256:a76da0a31da6fcae4210aa94fd779c65c75786bc9af06289cd1c184451ef7a65"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:e06695e0326d05b06833b40b7ef477e475d0b1ba3a6d27da1bb48c23209bf457"}, + {file = "pillow-11.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:96f82000e12f23e4f29346e42702b6ed9a2f2fea34a740dd5ffffcc8c539eb35"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3cd561ded2cf2bbae44d4605837221b987c216cff94f49dfeed63488bb228d2"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f189805c8be5ca5add39e6f899e6ce2ed824e65fb45f3c28cb2841911da19070"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:dd0052e9db3474df30433f83a71b9b23bd9e4ef1de13d92df21a52c0303b8ab6"}, + {file = "pillow-11.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:837060a8599b8f5d402e97197d4924f05a2e0d68756998345c829c33186217b1"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:aa8dd43daa836b9a8128dbe7d923423e5ad86f50a7a14dc688194b7be5c0dea2"}, + {file = "pillow-11.1.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:0a2f91f8a8b367e7a57c6e91cd25af510168091fb89ec5146003e424e1558a96"}, + {file = "pillow-11.1.0-cp311-cp311-win32.whl", hash = "sha256:c12fc111ef090845de2bb15009372175d76ac99969bdf31e2ce9b42e4b8cd88f"}, + {file = "pillow-11.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fbd43429d0d7ed6533b25fc993861b8fd512c42d04514a0dd6337fb3ccf22761"}, + {file = "pillow-11.1.0-cp311-cp311-win_arm64.whl", hash = "sha256:f7955ecf5609dee9442cbface754f2c6e541d9e6eda87fad7f7a989b0bdb9d71"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2062ffb1d36544d42fcaa277b069c88b01bb7298f4efa06731a7fd6cc290b81a"}, + {file = "pillow-11.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a85b653980faad27e88b141348707ceeef8a1186f75ecc600c395dcac19f385b"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9409c080586d1f683df3f184f20e36fb647f2e0bc3988094d4fd8c9f4eb1b3b3"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7fdadc077553621911f27ce206ffcbec7d3f8d7b50e0da39f10997e8e2bb7f6a"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:93a18841d09bcdd774dcdc308e4537e1f867b3dec059c131fde0327899734aa1"}, + {file = "pillow-11.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9aa9aeddeed452b2f616ff5507459e7bab436916ccb10961c4a382cd3e03f47f"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3cdcdb0b896e981678eee140d882b70092dac83ac1cdf6b3a60e2216a73f2b91"}, + {file = "pillow-11.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:36ba10b9cb413e7c7dfa3e189aba252deee0602c86c309799da5a74009ac7a1c"}, + {file = "pillow-11.1.0-cp312-cp312-win32.whl", hash = "sha256:cfd5cd998c2e36a862d0e27b2df63237e67273f2fc78f47445b14e73a810e7e6"}, + {file = "pillow-11.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:a697cd8ba0383bba3d2d3ada02b34ed268cb548b369943cd349007730c92bddf"}, + {file = "pillow-11.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:4dd43a78897793f60766563969442020e90eb7847463eca901e41ba186a7d4a5"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ae98e14432d458fc3de11a77ccb3ae65ddce70f730e7c76140653048c71bfcbc"}, + {file = "pillow-11.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cc1331b6d5a6e144aeb5e626f4375f5b7ae9934ba620c0ac6b3e43d5e683a0f0"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:758e9d4ef15d3560214cddbc97b8ef3ef86ce04d62ddac17ad39ba87e89bd3b1"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b523466b1a31d0dcef7c5be1f20b942919b62fd6e9a9be199d035509cbefc0ec"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:9044b5e4f7083f209c4e35aa5dd54b1dd5b112b108648f5c902ad586d4f945c5"}, + {file = "pillow-11.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:3764d53e09cdedd91bee65c2527815d315c6b90d7b8b79759cc48d7bf5d4f114"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:31eba6bbdd27dde97b0174ddf0297d7a9c3a507a8a1480e1e60ef914fe23d352"}, + {file = "pillow-11.1.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:b5d658fbd9f0d6eea113aea286b21d3cd4d3fd978157cbf2447a6035916506d3"}, + {file = "pillow-11.1.0-cp313-cp313-win32.whl", hash = "sha256:f86d3a7a9af5d826744fabf4afd15b9dfef44fe69a98541f666f66fbb8d3fef9"}, + {file = "pillow-11.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:593c5fd6be85da83656b93ffcccc2312d2d149d251e98588b14fbc288fd8909c"}, + {file = "pillow-11.1.0-cp313-cp313-win_arm64.whl", hash = "sha256:11633d58b6ee5733bde153a8dafd25e505ea3d32e261accd388827ee987baf65"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:70ca5ef3b3b1c4a0812b5c63c57c23b63e53bc38e758b37a951e5bc466449861"}, + {file = "pillow-11.1.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8000376f139d4d38d6851eb149b321a52bb8893a88dae8ee7d95840431977081"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ee85f0696a17dd28fbcfceb59f9510aa71934b483d1f5601d1030c3c8304f3c"}, + {file = "pillow-11.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:dd0e081319328928531df7a0e63621caf67652c8464303fd102141b785ef9547"}, + {file = "pillow-11.1.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:e63e4e5081de46517099dc30abe418122f54531a6ae2ebc8680bcd7096860eab"}, + {file = "pillow-11.1.0-cp313-cp313t-win32.whl", hash = "sha256:dda60aa465b861324e65a78c9f5cf0f4bc713e4309f83bc387be158b077963d9"}, + {file = "pillow-11.1.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ad5db5781c774ab9a9b2c4302bbf0c1014960a0a7be63278d13ae6fdf88126fe"}, + {file = "pillow-11.1.0-cp313-cp313t-win_arm64.whl", hash = "sha256:67cd427c68926108778a9005f2a04adbd5e67c442ed21d95389fe1d595458756"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:bf902d7413c82a1bfa08b06a070876132a5ae6b2388e2712aab3a7cbc02205c6"}, + {file = "pillow-11.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c1eec9d950b6fe688edee07138993e54ee4ae634c51443cfb7c1e7613322718e"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e275ee4cb11c262bd108ab2081f750db2a1c0b8c12c1897f27b160c8bd57bbc"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4db853948ce4e718f2fc775b75c37ba2efb6aaea41a1a5fc57f0af59eee774b2"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:ab8a209b8485d3db694fa97a896d96dd6533d63c22829043fd9de627060beade"}, + {file = "pillow-11.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:54251ef02a2309b5eec99d151ebf5c9904b77976c8abdcbce7891ed22df53884"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5bb94705aea800051a743aa4874bb1397d4695fb0583ba5e425ee0328757f196"}, + {file = "pillow-11.1.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89dbdb3e6e9594d512780a5a1c42801879628b38e3efc7038094430844e271d8"}, + {file = "pillow-11.1.0-cp39-cp39-win32.whl", hash = "sha256:e5449ca63da169a2e6068dd0e2fcc8d91f9558aba89ff6d02121ca8ab11e79e5"}, + {file = "pillow-11.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:3362c6ca227e65c54bf71a5f88b3d4565ff1bcbc63ae72c34b07bbb1cc59a43f"}, + {file = "pillow-11.1.0-cp39-cp39-win_arm64.whl", hash = "sha256:b20be51b37a75cc54c2c55def3fa2c65bb94ba859dde241cd0a4fd302de5ae0a"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:8c730dc3a83e5ac137fbc92dfcfe1511ce3b2b5d7578315b63dbbb76f7f51d90"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:7d33d2fae0e8b170b6a6c57400e077412240f6f5bb2a342cf1ee512a787942bb"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a8d65b38173085f24bc07f8b6c505cbb7418009fa1a1fcb111b1f4961814a442"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:015c6e863faa4779251436db398ae75051469f7c903b043a48f078e437656f83"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d44ff19eea13ae4acdaaab0179fa68c0c6f2f45d66a4d8ec1eda7d6cecbcc15f"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:d3d8da4a631471dfaf94c10c85f5277b1f8e42ac42bade1ac67da4b4a7359b73"}, + {file = "pillow-11.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:4637b88343166249fe8aa94e7c4a62a180c4b3898283bb5d3d2fd5fe10d8e4e0"}, + {file = "pillow-11.1.0.tar.gz", hash = "sha256:368da70808b36d73b4b390a8ffac11069f8a5c85f29eff1f1b01bcf3ef5b2a20"}, ] [package.extras] docs = ["furo", "olefile", "sphinx (>=8.1)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinxext-opengraph"] fpx = ["olefile"] mic = ["olefile"] -tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"] +tests = ["check-manifest", "coverage (>=7.4.2)", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout", "trove-classifiers (>=2024.10.12)"] typing = ["typing-extensions"] xmp = ["defusedxml"] @@ -2990,6 +3132,20 @@ nodeenv = ">=0.11.1" pyyaml = ">=5.1" virtualenv = ">=20.10.0" +[[package]] +name = "prompt-toolkit" +version = "3.0.48" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.48-py3-none-any.whl", hash = "sha256:f49a827f90062e411f1ce1f854f2aedb3c23353244f8108b89283587397ac10e"}, + {file = "prompt_toolkit-3.0.48.tar.gz", hash = "sha256:d6623ab0477a80df74e646bdbc93621143f5caf104206aa29294d53de1a03d90"}, +] + +[package.dependencies] +wcwidth = "*" + [[package]] name = "propcache" version = "0.2.1" @@ -3118,6 +3274,31 @@ files = [ {file = "protobuf-4.25.5.tar.gz", hash = "sha256:7f8249476b4a9473645db7f8ab42b02fe1488cbe5fb72fddd445e0665afd8584"}, ] +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.3" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, + {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, +] + +[package.extras] +tests = ["pytest"] + [[package]] name = "py" version = "1.11.0" @@ -3131,53 +3312,53 @@ files = [ [[package]] name = "pyarrow" -version = "18.1.0" +version = "19.0.0" description = "Python library for Apache Arrow" optional = false python-versions = ">=3.9" files = [ - {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e21488d5cfd3d8b500b3238a6c4b075efabc18f0f6d80b29239737ebd69caa6c"}, - {file = "pyarrow-18.1.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:b516dad76f258a702f7ca0250885fc93d1fa5ac13ad51258e39d402bd9e2e1e4"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f443122c8e31f4c9199cb23dca29ab9427cef990f283f80fe15b8e124bcc49b"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0a03da7f2758645d17b7b4f83c8bffeae5bbb7f974523fe901f36288d2eab71"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ba17845efe3aa358ec266cf9cc2800fa73038211fb27968bfa88acd09261a470"}, - {file = "pyarrow-18.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:3c35813c11a059056a22a3bef520461310f2f7eea5c8a11ef9de7062a23f8d56"}, - {file = "pyarrow-18.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:9736ba3c85129d72aefa21b4f3bd715bc4190fe4426715abfff90481e7d00812"}, - {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:eaeabf638408de2772ce3d7793b2668d4bb93807deed1725413b70e3156a7854"}, - {file = "pyarrow-18.1.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:3b2e2239339c538f3464308fd345113f886ad031ef8266c6f004d49769bb074c"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f39a2e0ed32a0970e4e46c262753417a60c43a3246972cfc2d3eb85aedd01b21"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31e9417ba9c42627574bdbfeada7217ad8a4cbbe45b9d6bdd4b62abbca4c6f6"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:01c034b576ce0eef554f7c3d8c341714954be9b3f5d5bc7117006b85fcf302fe"}, - {file = "pyarrow-18.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:f266a2c0fc31995a06ebd30bcfdb7f615d7278035ec5b1cd71c48d56daaf30b0"}, - {file = "pyarrow-18.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:d4f13eee18433f99adefaeb7e01d83b59f73360c231d4782d9ddfaf1c3fbde0a"}, - {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:9f3a76670b263dc41d0ae877f09124ab96ce10e4e48f3e3e4257273cee61ad0d"}, - {file = "pyarrow-18.1.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:da31fbca07c435be88a0c321402c4e31a2ba61593ec7473630769de8346b54ee"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:543ad8459bc438efc46d29a759e1079436290bd583141384c6f7a1068ed6f992"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0743e503c55be0fdb5c08e7d44853da27f19dc854531c0570f9f394ec9671d54"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:d4b3d2a34780645bed6414e22dda55a92e0fcd1b8a637fba86800ad737057e33"}, - {file = "pyarrow-18.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:c52f81aa6f6575058d8e2c782bf79d4f9fdc89887f16825ec3a66607a5dd8e30"}, - {file = "pyarrow-18.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:0ad4892617e1a6c7a551cfc827e072a633eaff758fa09f21c4ee548c30bcaf99"}, - {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:84e314d22231357d473eabec709d0ba285fa706a72377f9cc8e1cb3c8013813b"}, - {file = "pyarrow-18.1.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:f591704ac05dfd0477bb8f8e0bd4b5dc52c1cadf50503858dce3a15db6e46ff2"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:acb7564204d3c40babf93a05624fc6a8ec1ab1def295c363afc40b0c9e66c191"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:74de649d1d2ccb778f7c3afff6085bd5092aed4c23df9feeb45dd6b16f3811aa"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:f96bd502cb11abb08efea6dab09c003305161cb6c9eafd432e35e76e7fa9b90c"}, - {file = "pyarrow-18.1.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:36ac22d7782554754a3b50201b607d553a8d71b78cdf03b33c1125be4b52397c"}, - {file = "pyarrow-18.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:25dbacab8c5952df0ca6ca0af28f50d45bd31c1ff6fcf79e2d120b4a65ee7181"}, - {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:6a276190309aba7bc9d5bd2933230458b3521a4317acfefe69a354f2fe59f2bc"}, - {file = "pyarrow-18.1.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:ad514dbfcffe30124ce655d72771ae070f30bf850b48bc4d9d3b25993ee0e386"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aebc13a11ed3032d8dd6e7171eb6e86d40d67a5639d96c35142bd568b9299324"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d6cf5c05f3cee251d80e98726b5c7cc9f21bab9e9783673bac58e6dfab57ecc8"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:11b676cd410cf162d3f6a70b43fb9e1e40affbc542a1e9ed3681895f2962d3d9"}, - {file = "pyarrow-18.1.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:b76130d835261b38f14fc41fdfb39ad8d672afb84c447126b84d5472244cfaba"}, - {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:0b331e477e40f07238adc7ba7469c36b908f07c89b95dd4bd3a0ec84a3d1e21e"}, - {file = "pyarrow-18.1.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:2c4dd0c9010a25ba03e198fe743b1cc03cd33c08190afff371749c52ccbbaf76"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f97b31b4c4e21ff58c6f330235ff893cc81e23da081b1a4b1c982075e0ed4e9"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a4813cb8ecf1809871fd2d64a8eff740a1bd3691bbe55f01a3cf6c5ec869754"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:05a5636ec3eb5cc2a36c6edb534a38ef57b2ab127292a716d00eabb887835f1e"}, - {file = "pyarrow-18.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:73eeed32e724ea3568bb06161cad5fa7751e45bc2228e33dcb10c614044165c7"}, - {file = "pyarrow-18.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a1880dd6772b685e803011a6b43a230c23b566859a6e0c9a276c1e0faf4f4052"}, - {file = "pyarrow-18.1.0.tar.gz", hash = "sha256:9386d3ca9c145b5539a1cfc75df07757dff870168c959b473a0bccbc3abc8c73"}, + {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:c318eda14f6627966997a7d8c374a87d084a94e4e38e9abbe97395c215830e0c"}, + {file = "pyarrow-19.0.0-cp310-cp310-macosx_12_0_x86_64.whl", hash = "sha256:62ef8360ff256e960f57ce0299090fb86423afed5e46f18f1225f960e05aae3d"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2795064647add0f16563e57e3d294dbfc067b723f0fd82ecd80af56dad15f503"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a218670b26fb1bc74796458d97bcab072765f9b524f95b2fccad70158feb8b17"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:66732e39eaa2247996a6b04c8aa33e3503d351831424cdf8d2e9a0582ac54b34"}, + {file = "pyarrow-19.0.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:e675a3ad4732b92d72e4d24009707e923cab76b0d088e5054914f11a797ebe44"}, + {file = "pyarrow-19.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:f094742275586cdd6b1a03655ccff3b24b2610c3af76f810356c4c71d24a2a6c"}, + {file = "pyarrow-19.0.0-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:8e3a839bf36ec03b4315dc924d36dcde5444a50066f1c10f8290293c0427b46a"}, + {file = "pyarrow-19.0.0-cp311-cp311-macosx_12_0_x86_64.whl", hash = "sha256:ce42275097512d9e4e4a39aade58ef2b3798a93aa3026566b7892177c266f735"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9348a0137568c45601b031a8d118275069435f151cbb77e6a08a27e8125f59d4"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a0144a712d990d60f7f42b7a31f0acaccf4c1e43e957f7b1ad58150d6f639c1"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:2a1a109dfda558eb011e5f6385837daffd920d54ca00669f7a11132d0b1e6042"}, + {file = "pyarrow-19.0.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:be686bf625aa7b9bada18defb3a3ea3981c1099697239788ff111d87f04cd263"}, + {file = "pyarrow-19.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:239ca66d9a05844bdf5af128861af525e14df3c9591bcc05bac25918e650d3a2"}, + {file = "pyarrow-19.0.0-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:a7bbe7109ab6198688b7079cbad5a8c22de4d47c4880d8e4847520a83b0d1b68"}, + {file = "pyarrow-19.0.0-cp312-cp312-macosx_12_0_x86_64.whl", hash = "sha256:4624c89d6f777c580e8732c27bb8e77fd1433b89707f17c04af7635dd9638351"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b6d3ce4288793350dc2d08d1e184fd70631ea22a4ff9ea5c4ff182130249d9b"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:450a7d27e840e4d9a384b5c77199d489b401529e75a3b7a3799d4cd7957f2f9c"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:a08e2a8a039a3f72afb67a6668180f09fddaa38fe0d21f13212b4aba4b5d2451"}, + {file = "pyarrow-19.0.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:f43f5aef2a13d4d56adadae5720d1fed4c1356c993eda8b59dace4b5983843c1"}, + {file = "pyarrow-19.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:2f672f5364b2d7829ef7c94be199bb88bf5661dd485e21d2d37de12ccb78a136"}, + {file = "pyarrow-19.0.0-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:cf3bf0ce511b833f7bc5f5bb3127ba731e97222023a444b7359f3a22e2a3b463"}, + {file = "pyarrow-19.0.0-cp313-cp313-macosx_12_0_x86_64.whl", hash = "sha256:4d8b0c0de0a73df1f1bf439af1b60f273d719d70648e898bc077547649bb8352"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92aff08e23d281c69835e4a47b80569242a504095ef6a6223c1f6bb8883431d"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3b78eff5968a1889a0f3bc81ca57e1e19b75f664d9c61a42a604bf9d8402aae"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_28_aarch64.whl", hash = "sha256:b34d3bde38eba66190b215bae441646330f8e9da05c29e4b5dd3e41bde701098"}, + {file = "pyarrow-19.0.0-cp313-cp313-manylinux_2_28_x86_64.whl", hash = "sha256:5418d4d0fab3a0ed497bad21d17a7973aad336d66ad4932a3f5f7480d4ca0c04"}, + {file = "pyarrow-19.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:e82c3d5e44e969c217827b780ed8faf7ac4c53f934ae9238872e749fa531f7c9"}, + {file = "pyarrow-19.0.0-cp313-cp313t-macosx_12_0_arm64.whl", hash = "sha256:f208c3b58a6df3b239e0bb130e13bc7487ed14f39a9ff357b6415e3f6339b560"}, + {file = "pyarrow-19.0.0-cp313-cp313t-macosx_12_0_x86_64.whl", hash = "sha256:c751c1c93955b7a84c06794df46f1cec93e18610dcd5ab7d08e89a81df70a849"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b903afaa5df66d50fc38672ad095806443b05f202c792694f3a604ead7c6ea6e"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a22a4bc0937856263df8b94f2f2781b33dd7f876f787ed746608e06902d691a5"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:5e8a28b918e2e878c918f6d89137386c06fe577cd08d73a6be8dafb317dc2d73"}, + {file = "pyarrow-19.0.0-cp313-cp313t-manylinux_2_28_x86_64.whl", hash = "sha256:29cd86c8001a94f768f79440bf83fee23963af5e7bc68ce3a7e5f120e17edf89"}, + {file = "pyarrow-19.0.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:c0423393e4a07ff6fea08feb44153302dd261d0551cc3b538ea7a5dc853af43a"}, + {file = "pyarrow-19.0.0-cp39-cp39-macosx_12_0_x86_64.whl", hash = "sha256:718947fb6d82409013a74b176bf93e0f49ef952d8a2ecd068fecd192a97885b7"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c1c162c4660e0978411a4761f91113dde8da3433683efa473501254563dcbe8"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c73268cf557e688efb60f1ccbc7376f7e18cd8e2acae9e663e98b194c40c1a2d"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:edfe6d3916e915ada9acc4e48f6dafca7efdbad2e6283db6fd9385a1b23055f1"}, + {file = "pyarrow-19.0.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:da410b70a7ab8eb524112f037a7a35da7128b33d484f7671a264a4c224ac131d"}, + {file = "pyarrow-19.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:597360ffc71fc8cceea1aec1fb60cb510571a744fffc87db33d551d5de919bec"}, + {file = "pyarrow-19.0.0.tar.gz", hash = "sha256:8d47c691765cf497aaeed4954d226568563f1b3b74ff61139f2d77876717084b"}, ] [package.extras] @@ -3221,18 +3402,18 @@ files = [ [[package]] name = "pydantic" -version = "2.10.3" +version = "2.10.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic-2.10.3-py3-none-any.whl", hash = "sha256:be04d85bbc7b65651c5f8e6b9976ed9c6f41782a55524cef079a34a0bb82144d"}, - {file = "pydantic-2.10.3.tar.gz", hash = "sha256:cb5ac360ce894ceacd69c403187900a02c4b20b693a9dd1d643e1effab9eadf9"}, + {file = "pydantic-2.10.5-py3-none-any.whl", hash = "sha256:4dd4e322dbe55472cb7ca7e73f4b63574eecccf2835ffa2af9021ce113c83c53"}, + {file = "pydantic-2.10.5.tar.gz", hash = "sha256:278b38dbbaec562011d659ee05f63346951b3a248a6f3642e1bc68894ea2b4ff"}, ] [package.dependencies] annotated-types = ">=0.6.0" -pydantic-core = "2.27.1" +pydantic-core = "2.27.2" typing-extensions = ">=4.12.2" [package.extras] @@ -3241,116 +3422,130 @@ timezone = ["tzdata"] [[package]] name = "pydantic-core" -version = "2.27.1" +version = "2.27.2" description = "Core functionality for Pydantic validation and serialization" optional = false python-versions = ">=3.8" files = [ - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:71a5e35c75c021aaf400ac048dacc855f000bdfed91614b4a726f7432f1f3d6a"}, - {file = "pydantic_core-2.27.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f82d068a2d6ecfc6e054726080af69a6764a10015467d7d7b9f66d6ed5afa23b"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:121ceb0e822f79163dd4699e4c54f5ad38b157084d97b34de8b232bcaad70278"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4603137322c18eaf2e06a4495f426aa8d8388940f3c457e7548145011bb68e05"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a33cd6ad9017bbeaa9ed78a2e0752c5e250eafb9534f308e7a5f7849b0b1bfb4"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:15cc53a3179ba0fcefe1e3ae50beb2784dede4003ad2dfd24f81bba4b23a454f"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45d9c5eb9273aa50999ad6adc6be5e0ecea7e09dbd0d31bd0c65a55a2592ca08"}, - {file = "pydantic_core-2.27.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8bf7b66ce12a2ac52d16f776b31d16d91033150266eb796967a7e4621707e4f6"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:655d7dd86f26cb15ce8a431036f66ce0318648f8853d709b4167786ec2fa4807"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:5556470f1a2157031e676f776c2bc20acd34c1990ca5f7e56f1ebf938b9ab57c"}, - {file = "pydantic_core-2.27.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f69ed81ab24d5a3bd93861c8c4436f54afdf8e8cc421562b0c7504cf3be58206"}, - {file = "pydantic_core-2.27.1-cp310-none-win32.whl", hash = "sha256:f5a823165e6d04ccea61a9f0576f345f8ce40ed533013580e087bd4d7442b52c"}, - {file = "pydantic_core-2.27.1-cp310-none-win_amd64.whl", hash = "sha256:57866a76e0b3823e0b56692d1a0bf722bffb324839bb5b7226a7dbd6c9a40b17"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:ac3b20653bdbe160febbea8aa6c079d3df19310d50ac314911ed8cc4eb7f8cb8"}, - {file = "pydantic_core-2.27.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a5a8e19d7c707c4cadb8c18f5f60c843052ae83c20fa7d44f41594c644a1d330"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7f7059ca8d64fea7f238994c97d91f75965216bcbe5f695bb44f354893f11d52"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bed0f8a0eeea9fb72937ba118f9db0cb7e90773462af7962d382445f3005e5a4"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a3cb37038123447cf0f3ea4c74751f6a9d7afef0eb71aa07bf5f652b5e6a132c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:84286494f6c5d05243456e04223d5a9417d7f443c3b76065e75001beb26f88de"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:acc07b2cfc5b835444b44a9956846b578d27beeacd4b52e45489e93276241025"}, - {file = "pydantic_core-2.27.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4fefee876e07a6e9aad7a8c8c9f85b0cdbe7df52b8a9552307b09050f7512c7e"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:258c57abf1188926c774a4c94dd29237e77eda19462e5bb901d88adcab6af919"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:35c14ac45fcfdf7167ca76cc80b2001205a8d5d16d80524e13508371fb8cdd9c"}, - {file = "pydantic_core-2.27.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:d1b26e1dff225c31897696cab7d4f0a315d4c0d9e8666dbffdb28216f3b17fdc"}, - {file = "pydantic_core-2.27.1-cp311-none-win32.whl", hash = "sha256:2cdf7d86886bc6982354862204ae3b2f7f96f21a3eb0ba5ca0ac42c7b38598b9"}, - {file = "pydantic_core-2.27.1-cp311-none-win_amd64.whl", hash = "sha256:3af385b0cee8df3746c3f406f38bcbfdc9041b5c2d5ce3e5fc6637256e60bbc5"}, - {file = "pydantic_core-2.27.1-cp311-none-win_arm64.whl", hash = "sha256:81f2ec23ddc1b476ff96563f2e8d723830b06dceae348ce02914a37cb4e74b89"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9cbd94fc661d2bab2bc702cddd2d3370bbdcc4cd0f8f57488a81bcce90c7a54f"}, - {file = "pydantic_core-2.27.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5f8c4718cd44ec1580e180cb739713ecda2bdee1341084c1467802a417fe0f02"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:15aae984e46de8d376df515f00450d1522077254ef6b7ce189b38ecee7c9677c"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1ba5e3963344ff25fc8c40da90f44b0afca8cfd89d12964feb79ac1411a260ac"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:992cea5f4f3b29d6b4f7f1726ed8ee46c8331c6b4eed6db5b40134c6fe1768bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0325336f348dbee6550d129b1627cb8f5351a9dc91aad141ffb96d4937bd9529"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7597c07fbd11515f654d6ece3d0e4e5093edc30a436c63142d9a4b8e22f19c35"}, - {file = "pydantic_core-2.27.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3bbd5d8cc692616d5ef6fbbbd50dbec142c7e6ad9beb66b78a96e9c16729b089"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:dc61505e73298a84a2f317255fcc72b710b72980f3a1f670447a21efc88f8381"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:e1f735dc43da318cad19b4173dd1ffce1d84aafd6c9b782b3abc04a0d5a6f5bb"}, - {file = "pydantic_core-2.27.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f4e5658dbffe8843a0f12366a4c2d1c316dbe09bb4dfbdc9d2d9cd6031de8aae"}, - {file = "pydantic_core-2.27.1-cp312-none-win32.whl", hash = "sha256:672ebbe820bb37988c4d136eca2652ee114992d5d41c7e4858cdd90ea94ffe5c"}, - {file = "pydantic_core-2.27.1-cp312-none-win_amd64.whl", hash = "sha256:66ff044fd0bb1768688aecbe28b6190f6e799349221fb0de0e6f4048eca14c16"}, - {file = "pydantic_core-2.27.1-cp312-none-win_arm64.whl", hash = "sha256:9a3b0793b1bbfd4146304e23d90045f2a9b5fd5823aa682665fbdaf2a6c28f3e"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f216dbce0e60e4d03e0c4353c7023b202d95cbaeff12e5fd2e82ea0a66905073"}, - {file = "pydantic_core-2.27.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a2e02889071850bbfd36b56fd6bc98945e23670773bc7a76657e90e6b6603c08"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42b0e23f119b2b456d07ca91b307ae167cc3f6c846a7b169fca5326e32fdc6cf"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:764be71193f87d460a03f1f7385a82e226639732214b402f9aa61f0d025f0737"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1c00666a3bd2f84920a4e94434f5974d7bbc57e461318d6bb34ce9cdbbc1f6b2"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3ccaa88b24eebc0f849ce0a4d09e8a408ec5a94afff395eb69baf868f5183107"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c65af9088ac534313e1963443d0ec360bb2b9cba6c2909478d22c2e363d98a51"}, - {file = "pydantic_core-2.27.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:206b5cf6f0c513baffaeae7bd817717140770c74528f3e4c3e1cec7871ddd61a"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:062f60e512fc7fff8b8a9d680ff0ddaaef0193dba9fa83e679c0c5f5fbd018bc"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:a0697803ed7d4af5e4c1adf1670af078f8fcab7a86350e969f454daf598c4960"}, - {file = "pydantic_core-2.27.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:58ca98a950171f3151c603aeea9303ef6c235f692fe555e883591103da709b23"}, - {file = "pydantic_core-2.27.1-cp313-none-win32.whl", hash = "sha256:8065914ff79f7eab1599bd80406681f0ad08f8e47c880f17b416c9f8f7a26d05"}, - {file = "pydantic_core-2.27.1-cp313-none-win_amd64.whl", hash = "sha256:ba630d5e3db74c79300d9a5bdaaf6200172b107f263c98a0539eeecb857b2337"}, - {file = "pydantic_core-2.27.1-cp313-none-win_arm64.whl", hash = "sha256:45cf8588c066860b623cd11c4ba687f8d7175d5f7ef65f7129df8a394c502de5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:5897bec80a09b4084aee23f9b73a9477a46c3304ad1d2d07acca19723fb1de62"}, - {file = "pydantic_core-2.27.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:d0165ab2914379bd56908c02294ed8405c252250668ebcb438a55494c69f44ab"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b9af86e1d8e4cfc82c2022bfaa6f459381a50b94a29e95dcdda8442d6d83864"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5f6c8a66741c5f5447e047ab0ba7a1c61d1e95580d64bce852e3df1f895c4067"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a42d6a8156ff78981f8aa56eb6394114e0dedb217cf8b729f438f643608cbcd"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64c65f40b4cd8b0e049a8edde07e38b476da7e3aaebe63287c899d2cff253fa5"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdcf339322a3fae5cbd504edcefddd5a50d9ee00d968696846f089b4432cf78"}, - {file = "pydantic_core-2.27.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:bf99c8404f008750c846cb4ac4667b798a9f7de673ff719d705d9b2d6de49c5f"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8f1edcea27918d748c7e5e4d917297b2a0ab80cad10f86631e488b7cddf76a36"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:159cac0a3d096f79ab6a44d77a961917219707e2a130739c64d4dd46281f5c2a"}, - {file = "pydantic_core-2.27.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:029d9757eb621cc6e1848fa0b0310310de7301057f623985698ed7ebb014391b"}, - {file = "pydantic_core-2.27.1-cp38-none-win32.whl", hash = "sha256:a28af0695a45f7060e6f9b7092558a928a28553366519f64083c63a44f70e618"}, - {file = "pydantic_core-2.27.1-cp38-none-win_amd64.whl", hash = "sha256:2d4567c850905d5eaaed2f7a404e61012a51caf288292e016360aa2b96ff38d4"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:e9386266798d64eeb19dd3677051f5705bf873e98e15897ddb7d76f477131967"}, - {file = "pydantic_core-2.27.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4228b5b646caa73f119b1ae756216b59cc6e2267201c27d3912b592c5e323b60"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b3dfe500de26c52abe0477dde16192ac39c98f05bf2d80e76102d394bd13854"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aee66be87825cdf72ac64cb03ad4c15ffef4143dbf5c113f64a5ff4f81477bf9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b748c44bb9f53031c8cbc99a8a061bc181c1000c60a30f55393b6e9c45cc5bd"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ca038c7f6a0afd0b2448941b6ef9d5e1949e999f9e5517692eb6da58e9d44be"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e0bd57539da59a3e4671b90a502da9a28c72322a4f17866ba3ac63a82c4498e"}, - {file = "pydantic_core-2.27.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ac6c2c45c847bbf8f91930d88716a0fb924b51e0c6dad329b793d670ec5db792"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b94d4ba43739bbe8b0ce4262bcc3b7b9f31459ad120fb595627eaeb7f9b9ca01"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:00e6424f4b26fe82d44577b4c842d7df97c20be6439e8e685d0d715feceb9fb9"}, - {file = "pydantic_core-2.27.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:38de0a70160dd97540335b7ad3a74571b24f1dc3ed33f815f0880682e6880131"}, - {file = "pydantic_core-2.27.1-cp39-none-win32.whl", hash = "sha256:7ccebf51efc61634f6c2344da73e366c75e735960b5654b63d7e6f69a5885fa3"}, - {file = "pydantic_core-2.27.1-cp39-none-win_amd64.whl", hash = "sha256:a57847b090d7892f123726202b7daa20df6694cbd583b67a592e856bff603d6c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:3fa80ac2bd5856580e242dbc202db873c60a01b20309c8319b5c5986fbe53ce6"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:d950caa237bb1954f1b8c9227b5065ba6875ac9771bb8ec790d956a699b78676"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0e4216e64d203e39c62df627aa882f02a2438d18a5f21d7f721621f7a5d3611d"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02a3d637bd387c41d46b002f0e49c52642281edacd2740e5a42f7017feea3f2c"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:161c27ccce13b6b0c8689418da3885d3220ed2eae2ea5e9b2f7f3d48f1d52c27"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:19910754e4cc9c63bc1c7f6d73aa1cfee82f42007e407c0f413695c2f7ed777f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:e173486019cc283dc9778315fa29a363579372fe67045e971e89b6365cc035ed"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:af52d26579b308921b73b956153066481f064875140ccd1dfd4e77db89dbb12f"}, - {file = "pydantic_core-2.27.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:981fb88516bd1ae8b0cbbd2034678a39dedc98752f264ac9bc5839d3923fa04c"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5fde892e6c697ce3e30c61b239330fc5d569a71fefd4eb6512fc6caec9dd9e2f"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:816f5aa087094099fff7edabb5e01cc370eb21aa1a1d44fe2d2aefdfb5599b31"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c10c309e18e443ddb108f0ef64e8729363adbfd92d6d57beec680f6261556f3"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:98476c98b02c8e9b2eec76ac4156fd006628b1b2d0ef27e548ffa978393fd154"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c3027001c28434e7ca5a6e1e527487051136aa81803ac812be51802150d880dd"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:7699b1df36a48169cdebda7ab5a2bac265204003f153b4bd17276153d997670a"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:1c39b07d90be6b48968ddc8c19e7585052088fd7ec8d568bb31ff64c70ae3c97"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:46ccfe3032b3915586e469d4972973f893c0a2bb65669194a5bdea9bacc088c2"}, - {file = "pydantic_core-2.27.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:62ba45e21cf6571d7f716d903b5b7b6d2617e2d5d67c0923dc47b9d41369f840"}, - {file = "pydantic_core-2.27.1.tar.gz", hash = "sha256:62a763352879b84aa31058fc931884055fd75089cccbd9d58bb6afd01141b235"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa"}, + {file = "pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a"}, + {file = "pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9"}, + {file = "pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4"}, + {file = "pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048"}, + {file = "pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474"}, + {file = "pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc"}, + {file = "pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0"}, + {file = "pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2"}, + {file = "pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4"}, + {file = "pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9"}, + {file = "pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b"}, + {file = "pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e"}, + {file = "pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee"}, + {file = "pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506"}, + {file = "pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5"}, + {file = "pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9"}, + {file = "pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b"}, + {file = "pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993"}, + {file = "pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630"}, + {file = "pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362"}, + {file = "pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e"}, + {file = "pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9"}, + {file = "pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2"}, + {file = "pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35"}, + {file = "pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39"}, ] [package.dependencies] typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" +[[package]] +name = "pygments" +version = "2.19.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, + {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, +] + +[package.extras] +windows-terminal = ["colorama (>=0.4.6)"] + [[package]] name = "pyjwt" version = "2.10.1" @@ -3397,13 +3592,13 @@ torch = ["torch"] [[package]] name = "pyparsing" -version = "3.2.0" +version = "3.2.1" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.9" files = [ - {file = "pyparsing-3.2.0-py3-none-any.whl", hash = "sha256:93d9577b88da0bbea8cc8334ee8b918ed014968fd2ec383e868fb8afb1ccef84"}, - {file = "pyparsing-3.2.0.tar.gz", hash = "sha256:cbf74e27246d595d9a74b186b810f6fbb86726dbf3b9532efb343f6d7294fe9c"}, + {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, + {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, ] [package.extras] @@ -3487,6 +3682,22 @@ files = [ {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"}, ] +[[package]] +name = "pyvis" +version = "0.3.2" +description = "A Python network graph visualization library" +optional = false +python-versions = ">3.6" +files = [ + {file = "pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555"}, +] + +[package.dependencies] +ipython = ">=5.3.0" +jinja2 = ">=2.9.6" +jsonpickle = ">=1.4.1" +networkx = ">=1.11" + [[package]] name = "pywin32" version = "308" @@ -3745,13 +3956,13 @@ pyasn1 = ">=0.1.3" [[package]] name = "s3transfer" -version = "0.10.4" +version = "0.11.0" description = "An Amazon S3 Transfer Manager" optional = true python-versions = ">=3.8" files = [ - {file = "s3transfer-0.10.4-py3-none-any.whl", hash = "sha256:244a76a24355363a68164241438de1b72f8781664920260c48465896b712a41e"}, - {file = "s3transfer-0.10.4.tar.gz", hash = "sha256:29edc09801743c21eb5ecbc617a152df41d3c287f67b615f73e5f750583666a7"}, + {file = "s3transfer-0.11.0-py3-none-any.whl", hash = "sha256:f43b03931c198743569bbfb6a328a53f4b2b4ec723cd7c01fab68e3119db3f8b"}, + {file = "s3transfer-0.11.0.tar.gz", hash = "sha256:6563eda054c33bdebef7cbf309488634651c47270d828e594d151cd289fb7cf7"}, ] [package.dependencies] @@ -3773,23 +3984,23 @@ files = [ [[package]] name = "setuptools" -version = "75.6.0" +version = "75.8.0" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" files = [ - {file = "setuptools-75.6.0-py3-none-any.whl", hash = "sha256:ce74b49e8f7110f9bf04883b730f4765b774ef3ef28f722cce7c273d253aaf7d"}, - {file = "setuptools-75.6.0.tar.gz", hash = "sha256:8199222558df7c86216af4f84c30e9b34a61d8ba19366cc914424cdbd28252f6"}, + {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"}, + {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"}, ] [package.extras] -check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.7.0)"] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"] core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"] cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"] enabler = ["pytest-enabler (>=2.2)"] -test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] -type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (>=1.12,<1.14)", "pytest-mypy"] +test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] +type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"] [[package]] name = "six" @@ -3815,72 +4026,72 @@ files = [ [[package]] name = "sqlalchemy" -version = "2.0.36" +version = "2.0.37" description = "Database Abstraction Library" optional = true python-versions = ">=3.7" files = [ - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:59b8f3adb3971929a3e660337f5dacc5942c2cdb760afcabb2614ffbda9f9f72"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:37350015056a553e442ff672c2d20e6f4b6d0b2495691fa239d8aa18bb3bc908"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8318f4776c85abc3f40ab185e388bee7a6ea99e7fa3a30686580b209eaa35c08"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c245b1fbade9c35e5bd3b64270ab49ce990369018289ecfde3f9c318411aaa07"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:69f93723edbca7342624d09f6704e7126b152eaed3cdbb634cb657a54332a3c5"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:f9511d8dd4a6e9271d07d150fb2f81874a3c8c95e11ff9af3a2dfc35fe42ee44"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win32.whl", hash = "sha256:c3f3631693003d8e585d4200730616b78fafd5a01ef8b698f6967da5c605b3fa"}, - {file = "SQLAlchemy-2.0.36-cp310-cp310-win_amd64.whl", hash = "sha256:a86bfab2ef46d63300c0f06936bd6e6c0105faa11d509083ba8f2f9d237fb5b5"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:fd3a55deef00f689ce931d4d1b23fa9f04c880a48ee97af488fd215cf24e2a6c"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4f5e9cd989b45b73bd359f693b935364f7e1f79486e29015813c338450aa5a71"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0ddd9db6e59c44875211bc4c7953a9f6638b937b0a88ae6d09eb46cced54eff"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2519f3a5d0517fc159afab1015e54bb81b4406c278749779be57a569d8d1bb0d"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:59b1ee96617135f6e1d6f275bbe988f419c5178016f3d41d3c0abb0c819f75bb"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:39769a115f730d683b0eb7b694db9789267bcd027326cccc3125e862eb03bfd8"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win32.whl", hash = "sha256:66bffbad8d6271bb1cc2f9a4ea4f86f80fe5e2e3e501a5ae2a3dc6a76e604e6f"}, - {file = "SQLAlchemy-2.0.36-cp311-cp311-win_amd64.whl", hash = "sha256:23623166bfefe1487d81b698c423f8678e80df8b54614c2bf4b4cfcd7c711959"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f7b64e6ec3f02c35647be6b4851008b26cff592a95ecb13b6788a54ef80bbdd4"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46331b00096a6db1fdc052d55b101dbbfc99155a548e20a0e4a8e5e4d1362855"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fdf3386a801ea5aba17c6410dd1dc8d39cf454ca2565541b5ac42a84e1e28f53"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9dfa18ff2a67b09b372d5db8743c27966abf0e5344c555d86cc7199f7ad83a"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:90812a8933df713fdf748b355527e3af257a11e415b613dd794512461eb8a686"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1bc330d9d29c7f06f003ab10e1eaced295e87940405afe1b110f2eb93a233588"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win32.whl", hash = "sha256:79d2e78abc26d871875b419e1fd3c0bca31a1cb0043277d0d850014599626c2e"}, - {file = "SQLAlchemy-2.0.36-cp312-cp312-win_amd64.whl", hash = "sha256:b544ad1935a8541d177cb402948b94e871067656b3a0b9e91dbec136b06a2ff5"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b5cc79df7f4bc3d11e4b542596c03826063092611e481fcf1c9dfee3c94355ef"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:3c01117dd36800f2ecaa238c65365b7b16497adc1522bf84906e5710ee9ba0e8"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9bc633f4ee4b4c46e7adcb3a9b5ec083bf1d9a97c1d3854b92749d935de40b9b"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e46ed38affdfc95d2c958de328d037d87801cfcbea6d421000859e9789e61c2"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b2985c0b06e989c043f1dc09d4fe89e1616aadd35392aea2844f0458a989eacf"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:4a121d62ebe7d26fec9155f83f8be5189ef1405f5973ea4874a26fab9f1e262c"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win32.whl", hash = "sha256:0572f4bd6f94752167adfd7c1bed84f4b240ee6203a95e05d1e208d488d0d436"}, - {file = "SQLAlchemy-2.0.36-cp313-cp313-win_amd64.whl", hash = "sha256:8c78ac40bde930c60e0f78b3cd184c580f89456dd87fc08f9e3ee3ce8765ce88"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:be9812b766cad94a25bc63bec11f88c4ad3629a0cec1cd5d4ba48dc23860486b"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50aae840ebbd6cdd41af1c14590e5741665e5272d2fee999306673a1bb1fdb4d"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4557e1f11c5f653ebfdd924f3f9d5ebfc718283b0b9beebaa5dd6b77ec290971"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:07b441f7d03b9a66299ce7ccf3ef2900abc81c0db434f42a5694a37bd73870f2"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:28120ef39c92c2dd60f2721af9328479516844c6b550b077ca450c7d7dc68575"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win32.whl", hash = "sha256:b81ee3d84803fd42d0b154cb6892ae57ea6b7c55d8359a02379965706c7efe6c"}, - {file = "SQLAlchemy-2.0.36-cp37-cp37m-win_amd64.whl", hash = "sha256:f942a799516184c855e1a32fbc7b29d7e571b52612647866d4ec1c3242578fcb"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3d6718667da04294d7df1670d70eeddd414f313738d20a6f1d1f379e3139a545"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:72c28b84b174ce8af8504ca28ae9347d317f9dba3999e5981a3cd441f3712e24"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b11d0cfdd2b095e7b0686cf5fabeb9c67fae5b06d265d8180715b8cfa86522e3"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e32092c47011d113dc01ab3e1d3ce9f006a47223b18422c5c0d150af13a00687"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:6a440293d802d3011028e14e4226da1434b373cbaf4a4bbb63f845761a708346"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c54a1e53a0c308a8e8a7dffb59097bff7facda27c70c286f005327f21b2bd6b1"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win32.whl", hash = "sha256:1e0d612a17581b6616ff03c8e3d5eff7452f34655c901f75d62bd86449d9750e"}, - {file = "SQLAlchemy-2.0.36-cp38-cp38-win_amd64.whl", hash = "sha256:8958b10490125124463095bbdadda5aa22ec799f91958e410438ad6c97a7b793"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dc022184d3e5cacc9579e41805a681187650e170eb2fd70e28b86192a479dcaa"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b817d41d692bf286abc181f8af476c4fbef3fd05e798777492618378448ee689"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a4e46a888b54be23d03a89be510f24a7652fe6ff660787b96cd0e57a4ebcb46d"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c4ae3005ed83f5967f961fd091f2f8c5329161f69ce8480aa8168b2d7fe37f06"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:03e08af7a5f9386a43919eda9de33ffda16b44eb11f3b313e6822243770e9763"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:3dbb986bad3ed5ceaf090200eba750b5245150bd97d3e67343a3cfed06feecf7"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win32.whl", hash = "sha256:9fe53b404f24789b5ea9003fc25b9a3988feddebd7e7b369c8fac27ad6f52f28"}, - {file = "SQLAlchemy-2.0.36-cp39-cp39-win_amd64.whl", hash = "sha256:af148a33ff0349f53512a049c6406923e4e02bf2f26c5fb285f143faf4f0e46a"}, - {file = "SQLAlchemy-2.0.36-py3-none-any.whl", hash = "sha256:fddbe92b4760c6f5d48162aef14824add991aeda8ddadb3c31d56eb15ca69f8e"}, - {file = "sqlalchemy-2.0.36.tar.gz", hash = "sha256:7f2767680b6d2398aea7082e45a774b2b0767b5c8d8ffb9c8b683088ea9b29c5"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da36c3b0e891808a7542c5c89f224520b9a16c7f5e4d6a1156955605e54aef0e"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e7402ff96e2b073a98ef6d6142796426d705addd27b9d26c3b32dbaa06d7d069"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e6f5d254a22394847245f411a2956976401e84da4288aa70cbcd5190744062c1"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:41296bbcaa55ef5fdd32389a35c710133b097f7b2609d8218c0eabded43a1d84"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:bedee60385c1c0411378cbd4dc486362f5ee88deceea50002772912d798bb00f"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6c67415258f9f3c69867ec02fea1bf6508153709ecbd731a982442a590f2b7e4"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win32.whl", hash = "sha256:650dcb70739957a492ad8acff65d099a9586b9b8920e3507ca61ec3ce650bb72"}, + {file = "SQLAlchemy-2.0.37-cp310-cp310-win_amd64.whl", hash = "sha256:93d1543cd8359040c02b6614421c8e10cd7a788c40047dbc507ed46c29ae5636"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:78361be6dc9073ed17ab380985d1e45e48a642313ab68ab6afa2457354ff692c"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b661b49d0cb0ab311a189b31e25576b7ac3e20783beb1e1817d72d9d02508bf5"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d57bafbab289e147d064ffbd5cca2d7b1394b63417c0636cea1f2e93d16eb9e8"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fa2c0913f02341d25fb858e4fb2031e6b0813494cca1ba07d417674128ce11b"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9df21b8d9e5c136ea6cde1c50d2b1c29a2b5ff2b1d610165c23ff250e0704087"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:db18ff6b8c0f1917f8b20f8eca35c28bbccb9f83afa94743e03d40203ed83de9"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win32.whl", hash = "sha256:46954173612617a99a64aee103bcd3f078901b9a8dcfc6ae80cbf34ba23df989"}, + {file = "SQLAlchemy-2.0.37-cp311-cp311-win_amd64.whl", hash = "sha256:7b7e772dc4bc507fdec4ee20182f15bd60d2a84f1e087a8accf5b5b7a0dcf2ba"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:2952748ecd67ed3b56773c185e85fc084f6bdcdec10e5032a7c25a6bc7d682ef"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3151822aa1db0eb5afd65ccfafebe0ef5cda3a7701a279c8d0bf17781a793bb4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eaa8039b6d20137a4e02603aba37d12cd2dde7887500b8855356682fc33933f4"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1cdba1f73b64530c47b27118b7053b8447e6d6f3c8104e3ac59f3d40c33aa9fd"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1b2690456528a87234a75d1a1644cdb330a6926f455403c8e4f6cad6921f9098"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:cf5ae8a9dcf657fd72144a7fd01f243236ea39e7344e579a121c4205aedf07bb"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win32.whl", hash = "sha256:ea308cec940905ba008291d93619d92edaf83232ec85fbd514dcb329f3192761"}, + {file = "SQLAlchemy-2.0.37-cp312-cp312-win_amd64.whl", hash = "sha256:635d8a21577341dfe4f7fa59ec394b346da12420b86624a69e466d446de16aff"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:8c4096727193762e72ce9437e2a86a110cf081241919ce3fab8e89c02f6b6658"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e4fb5ac86d8fe8151966814f6720996430462e633d225497566b3996966b9bdb"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e56a139bfe136a22c438478a86f8204c1eb5eed36f4e15c4224e4b9db01cb3e4"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f95fc8e3f34b5f6b3effb49d10ac97c569ec8e32f985612d9b25dd12d0d2e94"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c505edd429abdfe3643fa3b2e83efb3445a34a9dc49d5f692dd087be966020e0"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:12b0f1ec623cccf058cf21cb544f0e74656618165b083d78145cafde156ea7b6"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win32.whl", hash = "sha256:293f9ade06b2e68dd03cfb14d49202fac47b7bb94bffcff174568c951fbc7af2"}, + {file = "SQLAlchemy-2.0.37-cp313-cp313-win_amd64.whl", hash = "sha256:d70f53a0646cc418ca4853da57cf3ddddbccb8c98406791f24426f2dd77fd0e2"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:44f569d0b1eb82301b92b72085583277316e7367e038d97c3a1a899d9a05e342"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2eae3423e538c10d93ae3e87788c6a84658c3ed6db62e6a61bb9495b0ad16bb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dfff7be361048244c3aa0f60b5e63221c5e0f0e509f4e47b8910e22b57d10ae7"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:5bc3339db84c5fb9130ac0e2f20347ee77b5dd2596ba327ce0d399752f4fce39"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:84b9f23b0fa98a6a4b99d73989350a94e4a4ec476b9a7dfe9b79ba5939f5e80b"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win32.whl", hash = "sha256:51bc9cfef83e0ac84f86bf2b10eaccb27c5a3e66a1212bef676f5bee6ef33ebb"}, + {file = "SQLAlchemy-2.0.37-cp37-cp37m-win_amd64.whl", hash = "sha256:8e47f1af09444f87c67b4f1bb6231e12ba6d4d9f03050d7fc88df6d075231a49"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6b788f14c5bb91db7f468dcf76f8b64423660a05e57fe277d3f4fad7b9dcb7ce"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521ef85c04c33009166777c77e76c8a676e2d8528dc83a57836b63ca9c69dcd1"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:75311559f5c9881a9808eadbeb20ed8d8ba3f7225bef3afed2000c2a9f4d49b9"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cce918ada64c956b62ca2c2af59b125767097ec1dca89650a6221e887521bfd7"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:9d087663b7e1feabea8c578d6887d59bb00388158e8bff3a76be11aa3f748ca2"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:cf95a60b36997dad99692314c4713f141b61c5b0b4cc5c3426faad570b31ca01"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win32.whl", hash = "sha256:d75ead7dd4d255068ea0f21492ee67937bd7c90964c8f3c2bea83c7b7f81b95f"}, + {file = "SQLAlchemy-2.0.37-cp38-cp38-win_amd64.whl", hash = "sha256:74bbd1d0a9bacf34266a7907d43260c8d65d31d691bb2356f41b17c2dca5b1d0"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:648ec5acf95ad59255452ef759054f2176849662af4521db6cb245263ae4aa33"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:35bd2df269de082065d4b23ae08502a47255832cc3f17619a5cea92ce478b02b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f581d365af9373a738c49e0c51e8b18e08d8a6b1b15cc556773bcd8a192fa8b"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:82df02816c14f8dc9f4d74aea4cb84a92f4b0620235daa76dde002409a3fbb5a"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94b564e38b344d3e67d2e224f0aec6ba09a77e4582ced41e7bfd0f757d926ec9"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:955a2a765aa1bd81aafa69ffda179d4fe3e2a3ad462a736ae5b6f387f78bfeb8"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win32.whl", hash = "sha256:03f0528c53ca0b67094c4764523c1451ea15959bbf0a8a8a3096900014db0278"}, + {file = "SQLAlchemy-2.0.37-cp39-cp39-win_amd64.whl", hash = "sha256:4b12885dc85a2ab2b7d00995bac6d967bffa8594123b02ed21e8eb2205a7584b"}, + {file = "SQLAlchemy-2.0.37-py3-none-any.whl", hash = "sha256:a8998bf9f8658bd3839cbc44ddbe982955641863da0c1efe5b00c1ab4f5c16b1"}, + {file = "sqlalchemy-2.0.37.tar.gz", hash = "sha256:12b28d99a9c14eaf4055810df1001557176716de0167b91026e648e65229bffb"}, ] [package.dependencies] -greenlet = {version = "!=0.4.17", markers = "python_version < \"3.13\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} +greenlet = {version = "!=0.4.17", markers = "python_version < \"3.14\" and (platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\")"} typing-extensions = ">=4.6.0" [package.extras] @@ -3908,6 +4119,25 @@ postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] pymysql = ["pymysql"] sqlcipher = ["sqlcipher3_binary"] +[[package]] +name = "stack-data" +version = "0.6.3" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, + {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + [[package]] name = "sympy" version = "1.13.1" @@ -3977,47 +4207,42 @@ protobuf = ">=3.20" [[package]] name = "tiktoken" -version = "0.7.0" +version = "0.8.0" description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "tiktoken-0.7.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:485f3cc6aba7c6b6ce388ba634fbba656d9ee27f766216f45146beb4ac18b25f"}, - {file = "tiktoken-0.7.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e54be9a2cd2f6d6ffa3517b064983fb695c9a9d8aa7d574d1ef3c3f931a99225"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:79383a6e2c654c6040e5f8506f3750db9ddd71b550c724e673203b4f6b4b4590"}, - {file = "tiktoken-0.7.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d4511c52caacf3c4981d1ae2df85908bd31853f33d30b345c8b6830763f769c"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:13c94efacdd3de9aff824a788353aa5749c0faee1fbe3816df365ea450b82311"}, - {file = "tiktoken-0.7.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8e58c7eb29d2ab35a7a8929cbeea60216a4ccdf42efa8974d8e176d50c9a3df5"}, - {file = "tiktoken-0.7.0-cp310-cp310-win_amd64.whl", hash = "sha256:21a20c3bd1dd3e55b91c1331bf25f4af522c525e771691adbc9a69336fa7f702"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:10c7674f81e6e350fcbed7c09a65bca9356eaab27fb2dac65a1e440f2bcfe30f"}, - {file = "tiktoken-0.7.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:084cec29713bc9d4189a937f8a35dbdfa785bd1235a34c1124fe2323821ee93f"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:811229fde1652fedcca7c6dfe76724d0908775b353556d8a71ed74d866f73f7b"}, - {file = "tiktoken-0.7.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:86b6e7dc2e7ad1b3757e8a24597415bafcfb454cebf9a33a01f2e6ba2e663992"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:1063c5748be36344c7e18c7913c53e2cca116764c2080177e57d62c7ad4576d1"}, - {file = "tiktoken-0.7.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:20295d21419bfcca092644f7e2f2138ff947a6eb8cfc732c09cc7d76988d4a89"}, - {file = "tiktoken-0.7.0-cp311-cp311-win_amd64.whl", hash = "sha256:959d993749b083acc57a317cbc643fb85c014d055b2119b739487288f4e5d1cb"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:71c55d066388c55a9c00f61d2c456a6086673ab7dec22dd739c23f77195b1908"}, - {file = "tiktoken-0.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:09ed925bccaa8043e34c519fbb2f99110bd07c6fd67714793c21ac298e449410"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:03c6c40ff1db0f48a7b4d2dafeae73a5607aacb472fa11f125e7baf9dce73704"}, - {file = "tiktoken-0.7.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d20b5c6af30e621b4aca094ee61777a44118f52d886dbe4f02b70dfe05c15350"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d427614c3e074004efa2f2411e16c826f9df427d3c70a54725cae860f09e4bf4"}, - {file = "tiktoken-0.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c46d7af7b8c6987fac9b9f61041b452afe92eb087d29c9ce54951280f899a97"}, - {file = "tiktoken-0.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:0bc603c30b9e371e7c4c7935aba02af5994a909fc3c0fe66e7004070858d3f8f"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2398fecd38c921bcd68418675a6d155fad5f5e14c2e92fcf5fe566fa5485a858"}, - {file = "tiktoken-0.7.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8f5f6afb52fb8a7ea1c811e435e4188f2bef81b5e0f7a8635cc79b0eef0193d6"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:861f9ee616766d736be4147abac500732b505bf7013cfaf019b85892637f235e"}, - {file = "tiktoken-0.7.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:54031f95c6939f6b78122c0aa03a93273a96365103793a22e1793ee86da31685"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:fffdcb319b614cf14f04d02a52e26b1d1ae14a570f90e9b55461a72672f7b13d"}, - {file = "tiktoken-0.7.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:c72baaeaefa03ff9ba9688624143c858d1f6b755bb85d456d59e529e17234769"}, - {file = "tiktoken-0.7.0-cp38-cp38-win_amd64.whl", hash = "sha256:131b8aeb043a8f112aad9f46011dced25d62629091e51d9dc1adbf4a1cc6aa98"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:cabc6dc77460df44ec5b879e68692c63551ae4fae7460dd4ff17181df75f1db7"}, - {file = "tiktoken-0.7.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8d57f29171255f74c0aeacd0651e29aa47dff6f070cb9f35ebc14c82278f3b25"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ee92776fdbb3efa02a83f968c19d4997a55c8e9ce7be821ceee04a1d1ee149c"}, - {file = "tiktoken-0.7.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e215292e99cb41fbc96988ef62ea63bb0ce1e15f2c147a61acc319f8b4cbe5bf"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:8a81bac94769cab437dd3ab0b8a4bc4e0f9cf6835bcaa88de71f39af1791727a"}, - {file = "tiktoken-0.7.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d6d73ea93e91d5ca771256dfc9d1d29f5a554b83821a1dc0891987636e0ae226"}, - {file = "tiktoken-0.7.0-cp39-cp39-win_amd64.whl", hash = "sha256:2bcb28ddf79ffa424f171dfeef9a4daff61a94c631ca6813f43967cb263b83b9"}, - {file = "tiktoken-0.7.0.tar.gz", hash = "sha256:1077266e949c24e0291f6c350433c6f0971365ece2b173a23bc3b9f9defef6b6"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b07e33283463089c81ef1467180e3e00ab00d46c2c4bbcef0acab5f771d6695e"}, + {file = "tiktoken-0.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9269348cb650726f44dd3bbb3f9110ac19a8dcc8f54949ad3ef652ca22a38e21"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e13f37bc4ef2d012731e93e0fef21dc3b7aea5bb9009618de9a4026844e560"}, + {file = "tiktoken-0.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f13d13c981511331eac0d01a59b5df7c0d4060a8be1e378672822213da51e0a2"}, + {file = "tiktoken-0.8.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:6b2ddbc79a22621ce8b1166afa9f9a888a664a579350dc7c09346a3b5de837d9"}, + {file = "tiktoken-0.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:d8c2d0e5ba6453a290b86cd65fc51fedf247e1ba170191715b049dac1f628005"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d622d8011e6d6f239297efa42a2657043aaed06c4f68833550cac9e9bc723ef1"}, + {file = "tiktoken-0.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2efaf6199717b4485031b4d6edb94075e4d79177a172f38dd934d911b588d54a"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5637e425ce1fc49cf716d88df3092048359a4b3bbb7da762840426e937ada06d"}, + {file = "tiktoken-0.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb0e352d1dbe15aba082883058b3cce9e48d33101bdaac1eccf66424feb5b47"}, + {file = "tiktoken-0.8.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:56edfefe896c8f10aba372ab5706b9e3558e78db39dd497c940b47bf228bc419"}, + {file = "tiktoken-0.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:326624128590def898775b722ccc327e90b073714227175ea8febbc920ac0a99"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:881839cfeae051b3628d9823b2e56b5cc93a9e2efb435f4cf15f17dc45f21586"}, + {file = "tiktoken-0.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:fe9399bdc3f29d428f16a2f86c3c8ec20be3eac5f53693ce4980371c3245729b"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9a58deb7075d5b69237a3ff4bb51a726670419db6ea62bdcd8bd80c78497d7ab"}, + {file = "tiktoken-0.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2908c0d043a7d03ebd80347266b0e58440bdef5564f84f4d29fb235b5df3b04"}, + {file = "tiktoken-0.8.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:294440d21a2a51e12d4238e68a5972095534fe9878be57d905c476017bff99fc"}, + {file = "tiktoken-0.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:d8f3192733ac4d77977432947d563d7e1b310b96497acd3c196c9bddb36ed9db"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:02be1666096aff7da6cbd7cdaa8e7917bfed3467cd64b38b1f112e96d3b06a24"}, + {file = "tiktoken-0.8.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:c94ff53c5c74b535b2cbf431d907fc13c678bbd009ee633a2aca269a04389f9a"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b231f5e8982c245ee3065cd84a4712d64692348bc609d84467c57b4b72dcbc5"}, + {file = "tiktoken-0.8.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4177faa809bd55f699e88c96d9bb4635d22e3f59d635ba6fd9ffedf7150b9953"}, + {file = "tiktoken-0.8.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:5376b6f8dc4753cd81ead935c5f518fa0fbe7e133d9e25f648d8c4dabdd4bad7"}, + {file = "tiktoken-0.8.0-cp313-cp313-win_amd64.whl", hash = "sha256:18228d624807d66c87acd8f25fc135665617cab220671eb65b50f5d70fa51f69"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7e17807445f0cf1f25771c9d86496bd8b5c376f7419912519699f3cc4dc5c12e"}, + {file = "tiktoken-0.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:886f80bd339578bbdba6ed6d0567a0d5c6cfe198d9e587ba6c447654c65b8edc"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6adc8323016d7758d6de7313527f755b0fc6c72985b7d9291be5d96d73ecd1e1"}, + {file = "tiktoken-0.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b591fb2b30d6a72121a80be24ec7a0e9eb51c5500ddc7e4c2496516dd5e3816b"}, + {file = "tiktoken-0.8.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:845287b9798e476b4d762c3ebda5102be87ca26e5d2c9854002825d60cdb815d"}, + {file = "tiktoken-0.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:1473cfe584252dc3fa62adceb5b1c763c1874e04511b197da4e6de51d6ce5a02"}, + {file = "tiktoken-0.8.0.tar.gz", hash = "sha256:9ccbb2740f24542534369c5635cfd9b2b3c2490754a78ac8831d99f89f94eeb2"}, ] [package.dependencies] @@ -4173,6 +4398,21 @@ notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] +[[package]] +name = "traitlets" +version = "5.14.3" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.8" +files = [ + {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, + {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0,<8.2)", "pytest-mock", "pytest-mypy-testing"] + [[package]] name = "triton" version = "3.1.0" @@ -4197,13 +4437,13 @@ tutorials = ["matplotlib", "pandas", "tabulate"] [[package]] name = "types-pyyaml" -version = "6.0.12.20240917" +version = "6.0.12.20241230" description = "Typing stubs for PyYAML" optional = false python-versions = ">=3.8" files = [ - {file = "types-PyYAML-6.0.12.20240917.tar.gz", hash = "sha256:d1405a86f9576682234ef83bcb4e6fff7c9305c8b1fbad5e0bcd4f7dbdc9c587"}, - {file = "types_PyYAML-6.0.12.20240917-py3-none-any.whl", hash = "sha256:392b267f1c0fe6022952462bf5d6523f31e37f6cea49b14cee7ad634b6301570"}, + {file = "types_PyYAML-6.0.12.20241230-py3-none-any.whl", hash = "sha256:fa4d32565219b68e6dee5f67534c722e53c00d1cfc09c435ef04d7353e1e96e6"}, + {file = "types_pyyaml-6.0.12.20241230.tar.gz", hash = "sha256:7f07622dbd34bb9c8b264fe860a17e0efcad00d50b5f27e93984909d9363498c"}, ] [[package]] @@ -4236,13 +4476,13 @@ urllib3 = ">=2" [[package]] name = "types-tqdm" -version = "4.67.0.20241119" +version = "4.67.0.20241221" description = "Typing stubs for tqdm" optional = false python-versions = ">=3.8" files = [ - {file = "types-tqdm-4.67.0.20241119.tar.gz", hash = "sha256:1769e0e94d5e6d8fa814965f9cf3d9928376dd15dabcbcb784bb8769081092b4"}, - {file = "types_tqdm-4.67.0.20241119-py3-none-any.whl", hash = "sha256:a18d4eb62db0d35c52707ae13d821b5a57970755273ecb56e133ccc0ac7e7c79"}, + {file = "types_tqdm-4.67.0.20241221-py3-none-any.whl", hash = "sha256:a1f1c9cda5c2d8482d2c73957a5398bfdedda10f6bc7b3b4e812d5c910486d29"}, + {file = "types_tqdm-4.67.0.20241221.tar.gz", hash = "sha256:e56046631056922385abe89aeb18af5611f471eadd7918a0ad7f34d84cd4c8cc"}, ] [package.dependencies] @@ -4310,13 +4550,13 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] [[package]] name = "urllib3" -version = "2.2.3" +version = "2.3.0" description = "HTTP library with thread-safe connection pooling, file post, and more." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "urllib3-2.2.3-py3-none-any.whl", hash = "sha256:ca899ca043dcb1bafa3e262d73aa25c465bfb49e0bd9dd5d59f1d0acba2f8fac"}, - {file = "urllib3-2.2.3.tar.gz", hash = "sha256:e7d814a81dad81e6caf2ec9fdedb284ecc9c73076b62654547cc64ccdcae26e9"}, + {file = "urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df"}, + {file = "urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d"}, ] [package.extras] @@ -4327,13 +4567,13 @@ zstd = ["zstandard (>=0.18.0)"] [[package]] name = "virtualenv" -version = "20.28.0" +version = "20.29.0" description = "Virtual Python Environment builder" optional = false python-versions = ">=3.8" files = [ - {file = "virtualenv-20.28.0-py3-none-any.whl", hash = "sha256:23eae1b4516ecd610481eda647f3a7c09aea295055337331bb4e6892ecce47b0"}, - {file = "virtualenv-20.28.0.tar.gz", hash = "sha256:2c9c3262bb8e7b87ea801d715fae4495e6032450c71d2309be9550e7364049aa"}, + {file = "virtualenv-20.29.0-py3-none-any.whl", hash = "sha256:c12311863497992dc4b8644f8ea82d3b35bb7ef8ee82e6630d76d0197c39baf9"}, + {file = "virtualenv-20.29.0.tar.gz", hash = "sha256:6345e1ff19d4b1296954cee076baaf58ff2a12a84a338c62b02eda39f20aa982"}, ] [package.dependencies] @@ -4345,6 +4585,17 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +[[package]] +name = "wcwidth" +version = "0.2.13" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, + {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, +] + [[package]] name = "werkzeug" version = "3.1.3" @@ -4628,4 +4879,4 @@ torch = ["torch"] [metadata] lock-version = "2.0" python-versions = ">=3.9, <4.0" -content-hash = "a32560a472d4f6230349b9c13273f2462d0bdd560600f051f220a4cb41eeed7f" +content-hash = "0a2347ff8b273139d7f2e32a1dc4da7596fb1d657bbb8b4b52aab42c57bbaad1" diff --git a/adalflow/pyproject.toml b/adalflow/pyproject.toml index bf6c9c488..5c8340a2c 100644 --- a/adalflow/pyproject.toml +++ b/adalflow/pyproject.toml @@ -1,14 +1,14 @@ [tool.poetry] name = "adalflow" -version = "0.2.6" +version = "0.2.7" description = "The Library to Build and Auto-optimize LLM Applications" authors = ["Li Yin "] readme = "README.md" repository = "https://github.com/SylphAI-Inc/AdalFlow" - license = "MIT" -maintainers = ["Li Yin "] + +maintainers = ["Li Yin ", "Filip Makraduli "] classifiers = [ "Topic :: Software Development :: Build Tools", "Topic :: Software Development :: Libraries :: Python Modules", @@ -36,35 +36,30 @@ python-dotenv = "^1.0.1" backoff = "^2.2.1" jinja2 = "^3.1.3" jsonlines = "^4.0.0" -tiktoken = "^0.7.0" -numpy = "^1.26.4" +tiktoken = ">=0.3.3" +numpy = "*" tqdm = "^4.66.4" -pyyaml = "^6.0.1" +PyYAML = ">=6.0.1" nest-asyncio = "^1.6.0" colorama = "^0.4.6" -botocore = "^1.34.149" # if not specified, it will fail to resolve the dependency, we need to figure out a way to get rid of botocore dependency diskcache = "^5.6.3" # Optional dependencies -openai = { version = "^1.12.0", optional = true } -groq = { version = "^0.9.0", optional = true } -faiss-cpu = { version = "^1.8.0", optional = true } -sqlalchemy = { version = "^2.0.30", optional = true } -pgvector = { version = "^0.3.1", optional = true } -torch = { version = "^2.3.1", optional = true } -anthropic = { version = "^0.31.1", optional = true } -google-generativeai = { version = "^0.7.2", optional = true } -cohere = { version = "^5.5.8", optional = true } -ollama = { version = "^0.2.1", optional = true } -lancedb = { version = "^0.5.2", optional = true } - - +openai = { version = ">=1.12.0", optional = true } +groq = { version = ">=0.9.0", optional = true } +faiss-cpu = { version = ">=1.8.0", optional = true } +sqlalchemy = { version = ">=2.0.30", optional = true } +pgvector = { version = ">=0.3.1", optional = true } +torch = { version = ">=2.3.1", optional = true } +anthropic = { version = ">=0.31.1", optional = true } +google-generativeai = { version = ">=0.7.2", optional = true } +cohere = { version = ">=5.5.8", optional = true } +ollama = { version = ">=0.2.1", optional = true } +lancedb = { version = ">=0.5.2", optional = true } # Azure dependencies -azure-core = { version = "^1.24.0", optional = true } -azure-identity = { version = "^1.12.0", optional = true } -# azure-ai-formrecognizer = { version = "^3.2.0", optional = true } -# azure-ai-textanalytics = { version = "^5.3.0", optional = true } +azure-core = { version = ">=1.24.0", optional = true } +azure-identity = { version = ">=1.12.0", optional = true } # amazon bedrock boto3 = { version = "^1.35.19", optional = true } @@ -80,7 +75,11 @@ groq = "^0.9.0" google-generativeai = "^0.7.2" anthropic = "^0.31.1" lancedb = "^0.5.2" - +# TODO: cant make qdrant work here +# qdrant_client = [ +# { version = ">=1.12.2,<2.0.0", optional = true, markers = "python_version >= '3.10'" }, +# { version = ">=1.8.0,<1.12.2", optional = true, markers = "python_version < '3.10'" }, +# ] [tool.poetry.group.typing.dependencies] @@ -96,23 +95,21 @@ tensorboardx = "^2.6.2.2" matplotlib = "^3.9.1" azure-identity = "^1.18.0" azure-core = "^1.31.0" +pyvis = "^0.3.2" [tool.poetry.group.extra.dependencies] datasets = "^2.21.0" [tool.poetry.extras] # allow pip install adalflow[openai, groq] + +# model providers openai = ["openai"] groq = ["groq"] anthropic = ["anthropic"] cohere = ["cohere"] google-generativeai = ["google-generativeai"] -pgvector = ["pgvector"] -faiss-cpu = ["faiss-cpu"] -sqlalchemy = ["sqlalchemy"] -torch = ["torch"] ollama = ["ollama"] -lancedb = ["lancedb"] azure = [ "azure-core", "azure-identity", @@ -121,12 +118,30 @@ azure = [ ] bedrock = ["boto3"] + +# vector dbs +lancedb = ["lancedb"] +pgvector = ["pgvector"] +# qdrant = ["qdrant-client"] + datasets = ["datasets"] +# similarity search local +faiss-cpu = ["faiss-cpu"] +torch = ["torch"] + +# data modeling +sqlalchemy = ["sqlalchemy"] + + [[tool.poetry.source]] name = "nvidia-pypi" priority = "supplemental" url = "https://pypi.nvidia.com" +# [[tool.poetry.source]] +# name = "nvidia-pypi" +# priority = "supplemental" +# url = "https://pypi.nvidia.com" [build-system] diff --git a/adalflow/tests/test_componentlist.py b/adalflow/tests/test_componentlist.py new file mode 100644 index 000000000..ee0bef56e --- /dev/null +++ b/adalflow/tests/test_componentlist.py @@ -0,0 +1,127 @@ +import unittest + +# Assuming `Component` and `ComponentList` are defined in a module named `adalflow.core` +from adalflow.core import Component, ComponentList + + +class MockComponent(Component): + """A mock component used for testing purposes.""" + + def __init__(self, value): + super().__init__() + self.value = value + + def __repr__(self): + return f"MockComponent({self.value})" + + +class TestComponentList(unittest.TestCase): + def setUp(self): + """Create some mock components for testing.""" + self.c1 = MockComponent(1) + self.c2 = MockComponent(2) + self.c3 = MockComponent(3) + + def test_initialization(self): + """Test that ComponentList initializes correctly with components.""" + cl = ComponentList([self.c1, self.c2]) + self.assertEqual(len(cl), 2) + self.assertIs(cl[0], self.c1) + self.assertIs(cl[1], self.c2) + + def test_append(self): + """Test appending a new component to the list.""" + cl = ComponentList([self.c1]) + cl.append(self.c2) + self.assertEqual(len(cl), 2) + self.assertIs(cl[1], self.c2) + + def test_extend(self): + """Test extending the list with multiple components.""" + cl = ComponentList([self.c1]) + cl.extend([self.c2, self.c3]) + self.assertEqual(len(cl), 3) + self.assertIs(cl[1], self.c2) + self.assertIs(cl[2], self.c3) + + def test_indexing(self): + """Test retrieving components by index.""" + cl = ComponentList([self.c1, self.c2, self.c3]) + self.assertIs(cl[0], self.c1) + self.assertIs(cl[2], self.c3) + + def test_slicing(self): + """Test slicing the list of components.""" + cl = ComponentList([self.c1, self.c2, self.c3]) + sliced = cl[1:] + self.assertEqual(len(sliced), 2) + self.assertIs(sliced[0], self.c2) + self.assertIs(sliced[1], self.c3) + + def test_insert(self): + """Test inserting a component at a specific index.""" + cl = ComponentList([self.c1, self.c3]) + cl.insert(1, self.c2) + self.assertEqual(len(cl), 3) + self.assertIs(cl[1], self.c2) + + def test_pop(self): + """Test removing and returning a component.""" + cl = ComponentList([self.c1, self.c2, self.c3]) + component = cl.pop(1) + self.assertIs(component, self.c2) + self.assertEqual(len(cl), 2) + + def test_delitem(self): + """Test deleting components by index and slice.""" + cl = ComponentList([self.c1, self.c2, self.c3]) + del cl[1] + self.assertEqual(len(cl), 2) + self.assertIs(cl[0], self.c1) + self.assertIs(cl[1], self.c3) + + cl = ComponentList([self.c1, self.c2, self.c3]) + del cl[1:] + self.assertEqual(len(cl), 1) + self.assertIs(cl[0], self.c1) + + def test_add(self): + """Test adding two ComponentLists.""" + cl1 = ComponentList([self.c1]) + cl2 = ComponentList([self.c2, self.c3]) + cl3 = cl1 + cl2 + self.assertEqual(len(cl3), 3) + self.assertIs(cl3[0], self.c1) + self.assertIs(cl3[1], self.c2) + self.assertIs(cl3[2], self.c3) + + def test_iadd(self): + """Test adding components using the += operator.""" + cl = ComponentList([self.c1]) + cl += [self.c2, self.c3] + self.assertEqual(len(cl), 3) + self.assertIs(cl[1], self.c2) + self.assertIs(cl[2], self.c3) + + def test_repr(self): + """Test the custom __repr__ implementation.""" + cl = ComponentList([MockComponent(1), MockComponent(1), MockComponent(2)]) + expected = ( + "ComponentList(\n (0-1): 2 x MockComponent(1)\n (2): MockComponent(2)\n)" + ) + self.assertEqual(repr(cl), expected) + + def test_len(self): + """Test the length of the ComponentList.""" + cl = ComponentList([self.c1, self.c2]) + self.assertEqual(len(cl), 2) + + def test_iter(self): + """Test iterating over the components.""" + cl = ComponentList([self.c1, self.c2, self.c3]) + components = list(iter(cl)) + self.assertEqual(components, [self.c1, self.c2, self.c3]) + + +if __name__ == "__main__": + unittest.main() diff --git a/adalflow/tests/test_openai_client.py b/adalflow/tests/test_openai_client.py index 2bfe2fd92..823f3ec60 100644 --- a/adalflow/tests/test_openai_client.py +++ b/adalflow/tests/test_openai_client.py @@ -1,7 +1,9 @@ import unittest from unittest.mock import patch, AsyncMock, Mock +import os +import base64 -from openai.types import CompletionUsage +from openai.types import CompletionUsage, Image from openai.types.chat import ChatCompletion from adalflow.core.types import ModelType, GeneratorOutput @@ -21,7 +23,7 @@ def setUp(self): "id": "cmpl-3Q8Z5J9Z1Z5z5", "created": 1635820005, "object": "chat.completion", - "model": "gpt-3.5-turbo", + "model": "gpt-4o", "choices": [ { "message": { @@ -37,10 +39,162 @@ def setUp(self): ), } self.mock_response = ChatCompletion(**self.mock_response) + self.mock_vision_response = { + "id": "cmpl-4Q8Z5J9Z1Z5z5", + "created": 1635820005, + "object": "chat.completion", + "model": "gpt-4o", + "choices": [ + { + "message": { + "content": "The image shows a beautiful sunset over mountains.", + "role": "assistant", + }, + "index": 0, + "finish_reason": "stop", + } + ], + "usage": CompletionUsage( + completion_tokens=15, prompt_tokens=25, total_tokens=40 + ), + } + self.mock_vision_response = ChatCompletion(**self.mock_vision_response) + self.mock_image_response = [ + Image( + url="https://example.com/generated_image.jpg", + b64_json=None, + revised_prompt="A white siamese cat sitting elegantly", + model="dall-e-3", + ) + ] self.api_kwargs = { "messages": [{"role": "user", "content": "Hello"}], - "model": "gpt-3.5-turbo", + "model": "gpt-4o", + } + self.vision_api_kwargs = { + "messages": [ + { + "role": "user", + "content": [ + {"type": "text", "text": "Describe this image"}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image.jpg", + "detail": "auto", + }, + }, + ], + } + ], + "model": "gpt-4o", + } + self.image_generation_kwargs = { + "model": "dall-e-3", + "prompt": "a white siamese cat", + "size": "1024x1024", + "quality": "standard", + "n": 1, + } + + def test_encode_image(self): + # Create a temporary test image file + test_image_path = "test_image.jpg" + test_content = b"fake image content" + try: + with open(test_image_path, "wb") as f: + f.write(test_content) + + # Test successful encoding + encoded = self.client._encode_image(test_image_path) + self.assertEqual(encoded, base64.b64encode(test_content).decode("utf-8")) + + # Test file not found + with self.assertRaises(ValueError) as context: + self.client._encode_image("nonexistent.jpg") + self.assertIn("Image file not found", str(context.exception)) + + finally: + # Cleanup + if os.path.exists(test_image_path): + os.remove(test_image_path) + + def test_prepare_image_content(self): + # Test URL image + url = "https://example.com/image.jpg" + result = self.client._prepare_image_content(url) + self.assertEqual( + result, + {"type": "image_url", "image_url": {"url": url, "detail": "auto"}}, + ) + + # Test with custom detail level + result = self.client._prepare_image_content(url, detail="high") + self.assertEqual( + result, + {"type": "image_url", "image_url": {"url": url, "detail": "high"}}, + ) + + # Test with pre-formatted content + pre_formatted = { + "type": "image_url", + "image_url": {"url": url, "detail": "low"}, + } + result = self.client._prepare_image_content(pre_formatted) + self.assertEqual(result, pre_formatted) + + def test_convert_inputs_to_api_kwargs_with_images(self): + # Test with single image URL + model_kwargs = { + "model": "gpt-4o", + "images": "https://example.com/image.jpg", + } + result = self.client.convert_inputs_to_api_kwargs( + input="Describe this image", + model_kwargs=model_kwargs, + model_type=ModelType.LLM, + ) + expected_content = [ + {"type": "text", "text": "Describe this image"}, + { + "type": "image_url", + "image_url": {"url": "https://example.com/image.jpg", "detail": "auto"}, + }, + ] + self.assertEqual(result["messages"][0]["content"], expected_content) + + # Test with multiple images + model_kwargs = { + "model": "gpt-4o", + "images": [ + "https://example.com/image1.jpg", + "https://example.com/image2.jpg", + ], + "detail": "high", } + result = self.client.convert_inputs_to_api_kwargs( + input="Compare these images", + model_kwargs=model_kwargs, + model_type=ModelType.LLM, + ) + expected_content = [ + {"type": "text", "text": "Compare these images"}, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image1.jpg", + "detail": "high", + }, + }, + { + "type": "image_url", + "image_url": { + "url": "https://example.com/image2.jpg", + "detail": "high", + }, + }, + ] + self.assertEqual(result["messages"][0]["content"], expected_content) @patch("adalflow.components.model_client.openai_client.AsyncOpenAI") async def test_acall_llm(self, MockAsyncOpenAI): @@ -98,6 +252,171 @@ def test_call(self, MockSyncOpenAI, mock_init_sync_client): self.assertEqual(output.usage.prompt_tokens, 20) self.assertEqual(output.usage.total_tokens, 30) + @patch("adalflow.components.model_client.openai_client.AsyncOpenAI") + async def test_acall_llm_with_vision(self, MockAsyncOpenAI): + mock_async_client = AsyncMock() + MockAsyncOpenAI.return_value = mock_async_client + + # Mock the vision model response + mock_async_client.chat.completions.create = AsyncMock( + return_value=self.mock_vision_response + ) + + # Call the _acall method with vision model + result = await self.client.acall( + api_kwargs=self.vision_api_kwargs, model_type=ModelType.LLM + ) + + # Assertions + MockAsyncOpenAI.assert_called_once() + mock_async_client.chat.completions.create.assert_awaited_once_with( + **self.vision_api_kwargs + ) + self.assertEqual(result, self.mock_vision_response) + + @patch( + "adalflow.components.model_client.openai_client.OpenAIClient.init_sync_client" + ) + @patch("adalflow.components.model_client.openai_client.OpenAI") + def test_call_with_vision(self, MockSyncOpenAI, mock_init_sync_client): + mock_sync_client = Mock() + MockSyncOpenAI.return_value = mock_sync_client + mock_init_sync_client.return_value = mock_sync_client + + # Mock the vision model response + mock_sync_client.chat.completions.create = Mock( + return_value=self.mock_vision_response + ) + + # Set the sync client + self.client.sync_client = mock_sync_client + + # Call the call method with vision model + result = self.client.call( + api_kwargs=self.vision_api_kwargs, model_type=ModelType.LLM + ) + + # Assertions + mock_sync_client.chat.completions.create.assert_called_once_with( + **self.vision_api_kwargs + ) + self.assertEqual(result, self.mock_vision_response) + + # Test parse_chat_completion for vision model + output = self.client.parse_chat_completion(completion=self.mock_vision_response) + self.assertTrue(isinstance(output, GeneratorOutput)) + self.assertEqual( + output.raw_response, "The image shows a beautiful sunset over mountains." + ) + self.assertEqual(output.usage.completion_tokens, 15) + self.assertEqual(output.usage.prompt_tokens, 25) + self.assertEqual(output.usage.total_tokens, 40) + + def test_convert_inputs_to_api_kwargs_for_image_generation(self): + # Test basic image generation + result = self.client.convert_inputs_to_api_kwargs( + input="a white siamese cat", + model_kwargs={"model": "dall-e-3"}, + model_type=ModelType.IMAGE_GENERATION, + ) + self.assertEqual(result["prompt"], "a white siamese cat") + self.assertEqual(result["model"], "dall-e-3") + self.assertEqual(result["size"], "1024x1024") # default + self.assertEqual(result["quality"], "standard") # default + self.assertEqual(result["n"], 1) # default + + # Test image edit + test_image = "test_image.jpg" + test_mask = "test_mask.jpg" + try: + # Create test files + with open(test_image, "wb") as f: + f.write(b"fake image content") + with open(test_mask, "wb") as f: + f.write(b"fake mask content") + + result = self.client.convert_inputs_to_api_kwargs( + input="a white siamese cat", + model_kwargs={ + "model": "dall-e-2", + "image": test_image, + "mask": test_mask, + }, + model_type=ModelType.IMAGE_GENERATION, + ) + self.assertEqual(result["prompt"], "a white siamese cat") + self.assertEqual(result["model"], "dall-e-2") + self.assertTrue(isinstance(result["image"], str)) # base64 encoded + self.assertTrue(isinstance(result["mask"], str)) # base64 encoded + finally: + # Cleanup + if os.path.exists(test_image): + os.remove(test_image) + if os.path.exists(test_mask): + os.remove(test_mask) + + @patch("adalflow.components.model_client.openai_client.AsyncOpenAI") + async def test_acall_image_generation(self, MockAsyncOpenAI): + mock_async_client = AsyncMock() + MockAsyncOpenAI.return_value = mock_async_client + + # Mock the image generation response + mock_async_client.images.generate = AsyncMock( + return_value=type("Response", (), {"data": self.mock_image_response})() + ) + + # Call the acall method with image generation + result = await self.client.acall( + api_kwargs=self.image_generation_kwargs, + model_type=ModelType.IMAGE_GENERATION, + ) + + # Assertions + MockAsyncOpenAI.assert_called_once() + mock_async_client.images.generate.assert_awaited_once_with( + **self.image_generation_kwargs + ) + self.assertEqual(result, self.mock_image_response) + + # Test parse_image_generation_response + output = self.client.parse_image_generation_response(result) + self.assertTrue(isinstance(output, GeneratorOutput)) + self.assertEqual(output.data, "https://example.com/generated_image.jpg") + + @patch( + "adalflow.components.model_client.openai_client.OpenAIClient.init_sync_client" + ) + @patch("adalflow.components.model_client.openai_client.OpenAI") + def test_call_image_generation(self, MockSyncOpenAI, mock_init_sync_client): + mock_sync_client = Mock() + MockSyncOpenAI.return_value = mock_sync_client + mock_init_sync_client.return_value = mock_sync_client + + # Mock the image generation response + mock_sync_client.images.generate = Mock( + return_value=type("Response", (), {"data": self.mock_image_response})() + ) + + # Set the sync client + self.client.sync_client = mock_sync_client + + # Call the call method with image generation + result = self.client.call( + api_kwargs=self.image_generation_kwargs, + model_type=ModelType.IMAGE_GENERATION, + ) + + # Assertions + mock_sync_client.images.generate.assert_called_once_with( + **self.image_generation_kwargs + ) + self.assertEqual(result, self.mock_image_response) + + # Test parse_image_generation_response + output = self.client.parse_image_generation_response(result) + self.assertTrue(isinstance(output, GeneratorOutput)) + self.assertEqual(output.data, "https://example.com/generated_image.jpg") + if __name__ == "__main__": unittest.main() diff --git a/adalflow/tests/test_parameter_text_grad.py b/adalflow/tests/test_parameter_text_grad.py index f3ea2c1ca..91cf4dc92 100644 --- a/adalflow/tests/test_parameter_text_grad.py +++ b/adalflow/tests/test_parameter_text_grad.py @@ -29,7 +29,9 @@ def setUp(self): ) def test_get_gradient_text(self): - expected_output = """1. + expected_output = """Batch size: 1 + +1. Conversation context Gradient 2""" @@ -79,7 +81,7 @@ def test_update_prompt(self): result = tgd.llm_optimizer.get_prompt(**user_prompt_kwargs) # Check if each variable value is in the generated output - self.assertIn("Role description", result) + # self.assertIn("Role description", result) # self.assertIn("short value", result) self.assertIn("gradient and context text", result) # self.assertIn("", result) diff --git a/benchmarks/hotpot_qa/adal_train.py b/benchmarks/hotpot_qa/_adal_train.py similarity index 99% rename from benchmarks/hotpot_qa/adal_train.py rename to benchmarks/hotpot_qa/_adal_train.py index 4162bc98d..e397cf0f6 100644 --- a/benchmarks/hotpot_qa/adal_train.py +++ b/benchmarks/hotpot_qa/_adal_train.py @@ -1,3 +1,4 @@ +"deprecated" """We will use dspy's retriever to keep that the same and only use our generator and optimizer""" import dspy @@ -22,9 +23,9 @@ def load_datasets(): - trainset = HotPotQA(split="train", size=20) - valset = HotPotQA(split="val", size=50) - testset = HotPotQA(split="test", size=50) # to keep the same as the dspy + trainset = HotPotQA(split="train", size=20) # 20 + valset = HotPotQA(split="val", size=50) # 50 + testset = HotPotQA(split="test", size=50) # to keep the same as the dspy #50 print(f"trainset, valset: {len(trainset)}, {len(valset)}, example: {trainset[0]}") return trainset, valset, testset diff --git a/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py b/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py new file mode 100644 index 000000000..cebcfdf28 --- /dev/null +++ b/benchmarks/hotpot_qa/adal_exp/build_multi_hop_rag.py @@ -0,0 +1,534 @@ +"""We will use dspy's retriever to keep that the same and only use our generator and optimizer""" + +import dspy +from typing import List +from dataclasses import dataclass, field + +import adalflow as adal +from adalflow.optim.parameter import Parameter, ParameterType + + +from adalflow.core.retriever import Retriever + +from benchmarks.hotpot_qa.adal_exp.build_vanilla_rag import DspyRetriever +from adalflow.utils.logger import printc + +colbertv2_wiki17_abstracts = dspy.ColBERTv2( + url="http://20.102.90.50:2017/wiki17_abstracts" +) + +dspy.settings.configure(rm=colbertv2_wiki17_abstracts) + + +# task pipeline + + +# dspy format +# Follow the following format. +# Context: may contain relevant facts +# Question: ${question} +# Reasoning: Let's think step by step in order to ${produce the query}. We ... +# Query: ${query} +@dataclass +class QueryRewritterData(adal.DataClass): + reasoning: str = field( + metadata={"desc": "The reasoning to produce the query"}, + ) + query: str = field( + metadata={"desc": "The query you produced"}, + ) + + __output_fields__ = ["reasoning", "query"] + + +query_template = """ +{{task_desc_str}} + +{{output_format_str}} +{# Few shot demos #} +{% if few_shot_demos is not none %} +Here are some examples: +{{few_shot_demos}} +{% endif %} + + +Context: {{context}} +Question: {{question}} + +""" + + +class DeduplicateList(adal.GradComponent): + def __init__(self): + super().__init__() + + def call(self, exisiting_list: List[str], new_list: List[str]) -> List[str]: + + seen = set() + return [x for x in exisiting_list + new_list if not (x in seen or seen.add(x))] + + def backward(self, *args, **kwargs): + + printc(f"DeduplicateList backward: {args}", "yellow") + return super().backward(*args, **kwargs) + + +# User customize an auto-grad operator +# Need this to be a GradComponent + + +# NOTE: deprecated +class MultiHopRetriever(adal.Retriever): + def __init__(self, model_client, model_kwargs, passages_per_hop=3, max_hops=2): + super().__init__() + + self.passages_per_hop = passages_per_hop + self.max_hops = max_hops + + self.data_parser = adal.DataClassParser( + data_class=QueryRewritterData, return_data_class=True, format_type="json" + ) + + # Grad Component + self.query_generators: List[adal.Generator] = [] + for i in range(self.max_hops): + self.query_generators.append( + adal.Generator( + name=f"query_generator_{i}", + model_client=model_client, + model_kwargs=model_kwargs, + prompt_kwargs={ + "few_shot_demos": Parameter( + name="few_shot_demos_1", + data=None, + role_desc="To provide few shot demos to the language model", + requires_opt=True, + param_type=ParameterType.DEMOS, + ), + "task_desc_str": Parameter( + name="task_desc_str", + data="""Write a simple search query that will help answer a complex question. + +You will receive a context(may contain relevant facts) and a question. +Think step by step.""", + role_desc="Task description for the language model", + requires_opt=True, + param_type=ParameterType.PROMPT, + ), + "output_format_str": self.data_parser.get_output_format_str(), + }, + template=query_template, + output_processors=self.data_parser, + use_cache=True, + ) + ) + self.retriever = DspyRetriever(top_k=passages_per_hop) + self.deduplicater = DeduplicateList() + + @staticmethod + def context_to_str(context: List[str]) -> str: + return "\n".join(context) + + @staticmethod + def deduplicate(seq: list[str]) -> list[str]: + """ + Source: https://stackoverflow.com/a/480227/1493011 + """ + + seen = set() + return [x for x in seq if not (x in seen or seen.add(x))] + + def call(self, *, question: str, id: str = None) -> adal.RetrieverOutput: + context = [] + print(f"question: {question}") + for i in range(self.max_hops): + gen_out = self.query_generators[i]( + prompt_kwargs={ + "context": self.context_to_str(context), + "question": question, + }, + id=id, + ) + + query = gen_out.data.query if gen_out.data and gen_out.data.query else None + + print(f"query {i}: {query}") + + retrieve_out = self.retriever.call(input=query) + passages = retrieve_out[0].documents + context = self.deduplicate(context + passages) + out = [adal.RetrieverOutput(documents=context, query=query, doc_indices=[])] + return out + + def forward(self, *, question: str, id: str = None) -> adal.Parameter: + # assemble the foundamental building blocks + context = [] + print(f"question: {question}") + # 1. make question a parameter as generator does not have it yet + # can create the parameter at the leaf, but not the intermediate nodes + question_param = adal.Parameter( + name="question", + data=question, + role_desc="The question to be answered", + requires_opt=True, + param_type=ParameterType.INPUT, + ) + context_param = adal.Parameter( + name="context", + data=context, + role_desc="The context to be used for the query", + requires_opt=True, + param_type=ParameterType.INPUT, + ) + context_param.add_successor_map_fn( + successor=self.query_generators[0], + map_fn=lambda x: self.context_to_str(x.data), + ) + + for i in range(self.max_hops): + + gen_out = self.query_generators[i].forward( + prompt_kwargs={ + "context": context_param, + "question": question_param, + }, + id=id, + ) + + success_map_fn = lambda x: ( # noqa E731 + x.full_response.data.query + if x.full_response + and x.full_response.data + and x.full_response.data.query + else None + ) + print(f"query {i}: {success_map_fn(gen_out)}") + + gen_out.add_successor_map_fn( + successor=self.retriever, map_fn=success_map_fn + ) + + retrieve_out = self.retriever.forward(input=gen_out) + + def retrieve_out_map_fn(x: adal.Parameter): + return x.data[0].documents if x.data and x.data[0].documents else [] + + print(f"retrieve_out: {retrieve_out}") + + retrieve_out.add_successor_map_fn( + successor=self.deduplicater, map_fn=retrieve_out_map_fn + ) + + context_param = self.deduplicater.forward( + exisiting_list=context_param, new_list=retrieve_out + ) + + context_param.param_type = ParameterType.RETRIEVER_OUTPUT + + return context_param + + +class MultiHopRetriever2(adal.Retriever): + def __init__(self, model_client, model_kwargs, passages_per_hop=3, max_hops=2): + super().__init__() + + self.passages_per_hop = passages_per_hop + self.max_hops = max_hops + + self.data_parser = adal.DataClassParser( + data_class=QueryRewritterData, return_data_class=True, format_type="json" + ) + + # Grad Component + # self.query_generators: List[adal.Generator] = [] + self.query_generators: adal.ComponentList[adal.Generator] = adal.ComponentList() + self.retrievers: List[Retriever] = [] + self.deduplicaters: List[adal.GradComponent] = [] + for i in range(self.max_hops): + self.query_generators.append( + adal.Generator( + name=f"query_generator_{i}", + model_client=model_client, + model_kwargs=model_kwargs, + prompt_kwargs={ + "few_shot_demos": Parameter( + name=f"few_shot_demos_{i}", + data=None, + role_desc="To provide few shot demos to the language model", + requires_opt=True, + param_type=ParameterType.DEMOS, + ), + "task_desc_str": Parameter( + name="task_desc_str", + data="""Write a simple search query that will help answer a complex question. + +You will receive a context(may contain relevant facts) and a question. +Think step by step.""", + role_desc="Task description for the language model", + requires_opt=True, + param_type=ParameterType.PROMPT, + ), + "output_format_str": self.data_parser.get_output_format_str(), + }, + template=query_template, + output_processors=self.data_parser, + use_cache=True, + ) + ) + self.retrievers.append(DspyRetriever(top_k=passages_per_hop)) + self.deduplicaters.append(DeduplicateList()) + + @staticmethod + def context_to_str(context: List[str]) -> str: + return "\n".join(context) + + @staticmethod + def deduplicate(seq: list[str]) -> list[str]: + """ + Source: https://stackoverflow.com/a/480227/1493011 + """ + + seen = set() + return [x for x in seq if not (x in seen or seen.add(x))] + + # def call(self, *, question: str, id: str = None) -> adal.RetrieverOutput: + # context = [] + # print(f"question: {question}") + # for i in range(self.max_hops): + # gen_out = self.query_generators[i]( + # prompt_kwargs={ + # "context": self.context_to_str(context), + # "question": question, + # }, + # id=id, + # ) + + # query = gen_out.data.query if gen_out.data and gen_out.data.query else None + + # print(f"query {i}: {query}") + + # retrieve_out = self.retrievers[i].call(input=query) + # passages = retrieve_out[0].documents + # context = self.deduplicate(context + passages) + # out = [adal.RetrieverOutput(documents=context, query=query, doc_indices=[])] + # return out + + # TODO: simplify and avoid the need where users need to write two methods (call and forward) + def call(self, *, input: str, id: str = None) -> List[adal.RetrieverOutput]: + # assemble the foundamental building blocks + printc(f"question: {input}", "yellow") + out = self.forward(input=input, id=id) + + if not isinstance(out, adal.Parameter): + raise ValueError("The output should be a parameter") + + return out.data # or full response its up to users + + def forward(self, *, input: str, id: str = None) -> adal.Parameter: + # assemble the foundamental building blocks + printc(f"question: {input}", "yellow") + context = [] + + queries: List[str] = [] + + for i in range(self.max_hops): + + gen_out = self.query_generators[i].forward( + prompt_kwargs={ + "context": context, # can be a list or a parameter + "question": adal.Parameter( + name="question", + data=input, + role_desc="The question to be answered", + requires_opt=False, + param_type=ParameterType.INPUT, + ), + }, + id=id, + ) + + success_map_fn = lambda x: ( # noqa E731 + x.full_response.data.query + if x.full_response + and x.full_response.data + and x.full_response.data.query + else ( + x.full_response.raw_response + if x.full_response and x.full_response.raw_response + else None + ) + ) + print(f"query {i}: {success_map_fn(gen_out)}") + + queries.append(success_map_fn(gen_out)) + + gen_out.add_successor_map_fn( + successor=self.retrievers[i], map_fn=success_map_fn + ) + + if success_map_fn(gen_out) is None: + raise ValueError(f"The query is None, please check the generator {i}") + + retrieve_out = self.retrievers[i].forward(input=gen_out, id=id) + + def retrieve_out_map_fn(x: adal.Parameter): + return x.data[0].documents if x.data and x.data[0].documents else [] + + # print(f"retrieve_out: {retrieve_out}") + + retrieve_out.add_successor_map_fn( + successor=self.deduplicaters[i], map_fn=retrieve_out_map_fn + ) + + context = self.deduplicaters[i].forward( + exisiting_list=context, new_list=retrieve_out + ) + + context.param_type = ParameterType.RETRIEVER_OUTPUT + + def context_to_retrover_output(x): + return [ + adal.RetrieverOutput( + documents=x.data, query=[input] + queries, doc_indices=[] + ) + ] + + context.data = context_to_retrover_output(context) + + printc(f"MultiHopRetriever2 grad fn: {context.grad_fn}", "yellow") + + return context + + def backward(self, *args, **kwargs): + + printc(f"MultiHopRetriever2 backward: {args}", "yellow") + super().backward(*args, **kwargs) + return + + +from benchmarks.hotpot_qa.adal_exp.build_vanilla_rag import VanillaRAG + + +class MultiHopRAG(VanillaRAG): + def __init__( + self, passages_per_hop=3, max_hops=2, model_client=None, model_kwargs=None + ): + super().__init__( + passages_per_hop=passages_per_hop, + model_client=model_client, + model_kwargs=model_kwargs, + ) + self.retriever = MultiHopRetriever2( + model_client=model_client, + model_kwargs=model_kwargs, + passages_per_hop=passages_per_hop, + max_hops=max_hops, + ) + + +def test_multi_hop_retriever(): + + from use_cases.config import ( + gpt_3_model, + ) + + multi_hop_retriever = MultiHopRetriever( + **gpt_3_model, + passages_per_hop=3, + max_hops=2, + ) + + question = "How many storeys are in the castle that David Gregory inherited?" + + # eval mode + output = multi_hop_retriever.call(question=question, id="1") + print(output) + + # train mode + multi_hop_retriever.train() + output = multi_hop_retriever.forward(question=question, id="1") + print(output) + output.draw_graph() + + +def test_multi_hop_retriever2(): + + from use_cases.config import ( + gpt_3_model, + ) + + multi_hop_retriever = MultiHopRetriever2( + **gpt_3_model, + passages_per_hop=3, + max_hops=2, + ) + + question = "How many storeys are in the castle that David Gregory inherited?" + + # eval mode + # output = multi_hop_retriever.call(question=question, id="1") + # print(output) + + # train mode + multi_hop_retriever.train() + output = multi_hop_retriever.forward(input=question, id="1") + # print(output) + output.draw_graph(full_trace=True) + + # multi_hop_retriever.eval() + # output = multi_hop_retriever.call(input=question, id="1") + # print(output) + + +def test_multi_hop_rag(): + + from use_cases.config import ( + gpt_3_model, + ) + + adal.get_logger(level="DEBUG") + + task = MultiHopRAG( + **gpt_3_model, + passages_per_hop=3, + max_hops=2, + ) + print(f"task: {task}") + + for name, comp in task.named_components(): + + if isinstance(comp, adal.Generator): + print(f"name: {name}") + print(f"comp: {comp }") + return + + # test the retriever + + question = "How many storeys are in the castle that David Gregory inherited?" + + task.train() + + # id = "1" + + # retriever_out = task.retriever(input=question, id=id) + + # print(f"retriever_out: {retriever_out}") + + # test the forward function + generator_out = task.forward(question=question, id="1") + print(f"generator_out: {generator_out}") + + generator_out.draw_graph() + + # task.eval() + # generator_out = task.call(question=question, id="1") + # print(f"generator_out: {generator_out}") + + +if __name__ == "__main__": + ### Try the minimum effort to test on any task + + # get_logger(level="DEBUG") + # test_multi_hop_retriever() + # test_multi_hop_retriever2() + test_multi_hop_rag() diff --git a/benchmarks/hotpot_qa/adal_exp/build_vanilla_rag.py b/benchmarks/hotpot_qa/adal_exp/build_vanilla_rag.py index 7e66ca9bb..3eae05989 100644 --- a/benchmarks/hotpot_qa/adal_exp/build_vanilla_rag.py +++ b/benchmarks/hotpot_qa/adal_exp/build_vanilla_rag.py @@ -108,10 +108,15 @@ def __init__(self, top_k: int = 3): self.top_k = top_k self.dspy_retriever = dspy.Retrieve(k=top_k) - def call(self, input: str, top_k: Optional[int] = None) -> List[RetrieverOutput]: + def call( + self, input: str, top_k: Optional[int] = None, id: str = None + ) -> List[RetrieverOutput]: k = top_k or self.top_k + if not input: + raise ValueError(f"Input cannot be empty, top_k: {k}") + output = self.dspy_retriever(query_or_queries=input, k=k) # print(f"dsy_retriever output: {output}") final_output: List[RetrieverOutput] = [] @@ -152,7 +157,7 @@ def __init__(self, passages_per_hop=3, model_client=None, model_kwargs=None): data=task_desc_str, role_desc="Task description for the language model", param_type=adal.ParameterType.PROMPT, - requires_opt=False, + requires_opt=True, ), "few_shot_demos": adal.Parameter( data=None, @@ -180,7 +185,7 @@ def call(self, question: str, id: str = None) -> adal.GeneratorOutput: "This component is not supposed to be called in training mode" ) - retriever_out = self.retriever.call(input=question) + retriever_out = self.retriever.call(input=question, id=id) successor_map_fn = lambda x: ( # noqa E731 "\n\n".join(x[0].documents) if x and x[0] and x[0].documents else "" @@ -201,11 +206,21 @@ def call(self, question: str, id: str = None) -> adal.GeneratorOutput: # print(f"retriever_out: {retriever_out}") return output + # def call(self, *, question: str, id: str = None) -> adal.GeneratorOutput: + # self.train() + # out = self.forward(question=question, id=id) + # if not isinstance(out, adal.Parameter): + # raise ValueError( + # "This output should be a Parameter, please check the forward function" + # ) + # self.eval() + # return out.data + # TODO: add id in the retriever output def forward(self, question: str, id: str = None) -> adal.Parameter: if not self.training: raise ValueError("This component is not supposed to be called in eval mode") - retriever_out = self.retriever.forward(input=question) + retriever_out = self.retriever.forward(input=question, id=id) successor_map_fn = lambda x: ( # noqa E731 "\n\n".join(x.data[0].documents) if x.data and x.data[0] and x.data[0].documents @@ -281,9 +296,9 @@ def test_vailla_rag(): generator_out.draw_graph() - task.eval() - generator_out = task.call(question=question, id="1") - print(f"generator_out: {generator_out}") + # task.eval() + # generator_out = task.call(question=question, id="1") + # print(f"generator_out: {generator_out}") if __name__ == "__main__": diff --git a/benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py b/benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py new file mode 100644 index 000000000..d80e6336a --- /dev/null +++ b/benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py @@ -0,0 +1,183 @@ +from typing import Any, Callable, Dict, Tuple + +import adalflow as adal +from adalflow.eval.answer_match_acc import AnswerMatchAcc +from adalflow.datasets.types import HotPotQAData + +from benchmarks.hotpot_qa._adal_train import load_datasets +from benchmarks.hotpot_qa.adal_exp.build_multi_hop_rag import MultiHopRAG +from use_cases.config import gpt_3_model, gpt_4o_model + + +# TODO: look more into the loss function +# TODO: test LLM judge too. +class MultiHopRAGAdal(adal.AdalComponent): + def __init__( + self, + model_client: adal.ModelClient, + model_kwargs: Dict, + backward_engine_model_config: Dict | None = None, + teacher_model_config: Dict | None = None, + text_optimizer_model_config: Dict | None = None, + ): + task = MultiHopRAG( + model_client=model_client, + model_kwargs=model_kwargs, + passages_per_hop=3, + max_hops=2, + ) + eval_fn = AnswerMatchAcc(type="fuzzy_match").compute_single_item + loss_fn = adal.EvalFnToTextLoss( + eval_fn=eval_fn, eval_fn_desc="fuzzy_match: 1 if str(y) in str(y_gt) else 0" + ) + super().__init__( + task=task, + eval_fn=eval_fn, + loss_fn=loss_fn, + backward_engine_model_config=backward_engine_model_config, + teacher_model_config=teacher_model_config, + text_optimizer_model_config=text_optimizer_model_config, + ) + + # tell the trainer how to call the task + def prepare_task(self, sample: HotPotQAData) -> Tuple[Callable[..., Any], Dict]: + if self.task.training: + return self.task.forward, {"question": sample.question, "id": sample.id} + else: + return self.task.call, {"question": sample.question, "id": sample.id} + + # TODO: use two map fn to make the cde even simpler + + # eval mode: get the generator output, directly engage with the eval_fn + def prepare_eval(self, sample: HotPotQAData, y_pred: adal.GeneratorOutput) -> float: + y_label = "" + if y_pred and y_pred.data and y_pred.data.answer: + y_label = y_pred.data.answer + return self.eval_fn, {"y": y_label, "y_gt": sample.answer} + + # train mode: get the loss and get the data from the full_response + def prepare_loss(self, sample: HotPotQAData, pred: adal.Parameter): + # prepare gt parameter + y_gt = adal.Parameter( + name="y_gt", + data=sample.answer, + eval_input=sample.answer, + requires_opt=False, + ) + + # pred's full_response is the output of the task pipeline which is GeneratorOutput + pred.eval_input = ( + pred.full_response.data.answer + if pred.full_response + and pred.full_response.data + and pred.full_response.data.answer + else "" + ) + return self.loss_fn, {"kwargs": {"y": pred, "y_gt": y_gt}} + + +# Note: diagnose is quite helpful, it helps you to quickly check if the evalfunction is the right metrics +# i checked the eval which does fuzzy match, and found some yes and Yes are not matched, then converted both strings to lower and +# the performances have gone up from 0.15 to 0.4 +def train_diagnose( + model_client: adal.ModelClient, + model_kwargs: Dict, +) -> Dict: + + trainset, valset, testset = load_datasets() + + adal_component = MultiHopRAGAdal( + model_client, + model_kwargs, + backward_engine_model_config=gpt_4o_model, + teacher_model_config=gpt_3_model, + text_optimizer_model_config=gpt_3_model, + ) + trainer = adal.Trainer(adaltask=adal_component) + trainer.diagnose(dataset=trainset, split="train") + # trainer.diagnose(dataset=valset, split="val") + # trainer.diagnose(dataset=testset, split="test") + + +def train( + train_batch_size=4, # larger batch size is not that effective, probably because of llm's lost in the middle + raw_shots: int = 0, + bootstrap_shots: int = 4, + max_steps=1, + num_workers=4, + strategy="constrained", + optimization_order="sequential", + debug=False, + resume_from_ckpt=None, + exclude_input_fields_from_bootstrap_demos=True, +): + adal_component = MultiHopRAGAdal( + **gpt_3_model, + teacher_model_config=gpt_3_model, + text_optimizer_model_config=gpt_4o_model, # gpt3.5 is not enough to be used as a good optimizer, it struggles for long contenxt + backward_engine_model_config=gpt_4o_model, + ) + print(adal_component) + trainer = adal.Trainer( + train_batch_size=train_batch_size, + adaltask=adal_component, + strategy=strategy, + max_steps=max_steps, + num_workers=num_workers, + raw_shots=raw_shots, + bootstrap_shots=bootstrap_shots, + debug=debug, + weighted_sampling=True, + optimization_order=optimization_order, + exclude_input_fields_from_bootstrap_demos=exclude_input_fields_from_bootstrap_demos, + sequential_order=["text", "demo"], + ) + print(trainer) + + train_dataset, val_dataset, test_dataset = load_datasets() + trainer.fit( + train_dataset=train_dataset, + val_dataset=val_dataset, + test_dataset=test_dataset, + resume_from_ckpt=resume_from_ckpt, + ) + + +if __name__ == "__main__": + from use_cases.config import gpt_3_model + + log = adal.get_logger(level="DEBUG", enable_console=False) + + adal.setup_env() + + # task = MultiHopRAGAdal(**gpt_3_model) + # print(task) + + # train_diagnose(**gpt_3_model) + + # train: 0.15 before the evaluator converted to lower and 0.4 after the conversion + train( + debug=False, + max_steps=12, + # resume_from_ckpt="/Users/liyin/.adalflow/ckpt/ValinaRAGAdal/random_max_steps_12_7c091_run_1.json", + ) + + # notes for debug: if have nontype, delete all model cache and try again + # raise ValueError(ValueError: score must be provided for each demo, + + # 12/11/2024 + # demo only: /Users/liyin/Documents/test/LightRAG/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_8cdfc_run_9.json + + # why text grad did not improve in the rag case? Do we need to improve the meta prompt? + # /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_2686e_run_1.json + # 0.58 -> 0.68 on the test split + # 0.72 text grad /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_c1660_run_1.json + # try cycle next + # 0.66 /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_1d189_run_1.json + # no gradients 1021s (/Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_68e7e_run_1.json) -> 0.64 -> 0.68, pass 10/10+28 + # no gradient but scores (positive & negative) /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_83871_run_1.json 0.64->0.66, test 0.64 -> 0.66 + # no gradient but only negative score + # no gradient but score + teacher demonstration. + # feedback while seeing the gt + y + # only negative feedback /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_f5506_run_1.json 0.62 -> 0.7 + # /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/constrained_max_steps_12_b4aa5_run_1.json 0.74 pass rate 8 32 diff --git a/benchmarks/hotpot_qa/adal_exp/train_vanilla.py b/benchmarks/hotpot_qa/adal_exp/train_vanilla.py index b6cfe9e62..fc14e161e 100644 --- a/benchmarks/hotpot_qa/adal_exp/train_vanilla.py +++ b/benchmarks/hotpot_qa/adal_exp/train_vanilla.py @@ -4,7 +4,7 @@ from adalflow.eval.answer_match_acc import AnswerMatchAcc from adalflow.datasets.types import HotPotQAData -from benchmarks.hotpot_qa.adal_train import load_datasets +from benchmarks.hotpot_qa._adal_train import load_datasets from benchmarks.hotpot_qa.adal_exp.build_vanilla_rag import VanillaRAG from use_cases.config import gpt_3_model, gpt_4o_model diff --git a/docs/CHANGLOG.md b/docs/CHANGLOG.md new file mode 100644 index 000000000..585596ab5 --- /dev/null +++ b/docs/CHANGLOG.md @@ -0,0 +1,3 @@ +## [0.2.7] - 2025-01-16 + +- Added multimodal support in Generator tutorial and more explanation by Filip. diff --git a/docs/source/get_started/installation.rst b/docs/source/get_started/installation.rst index 32dae57fb..ac59d6f15 100644 --- a/docs/source/get_started/installation.rst +++ b/docs/source/get_started/installation.rst @@ -19,7 +19,7 @@ If you know you will need `openai` and `faiss-cpu`, you can do so with: .. code-block:: bash - pip install adalflow[openai, faiss-cpu] + pip install adalflow[openai,faiss-cpu] .. note:: Check the `Optional Packages` section for more information on the available packages. diff --git a/docs/source/tutorials/auto_text_grad.rst b/docs/source/tutorials/auto_text_grad.rst index ea4294f77..438da80d6 100644 --- a/docs/source/tutorials/auto_text_grad.rst +++ b/docs/source/tutorials/auto_text_grad.rst @@ -139,6 +139,13 @@ We currently have the following operators: - forward will be able to track the predecessors to form a DAG of parameters, this will always be helpful. - # a forward will +10/27 + +**Score for weighted sampling in few-shot demo** + +Backpropagation is also used in few-shot demo especially at passing the score backward to predecessors and accumulate to the demo parameter. + + Generator Adaptation ~~~~~~~~~~~~~~~~~~~~~~ diff --git a/docs/source/tutorials/embedder.rst b/docs/source/tutorials/embedder.rst index 262e64c5e..58a9c9271 100644 --- a/docs/source/tutorials/embedder.rst +++ b/docs/source/tutorials/embedder.rst @@ -1,3 +1,16 @@ +.. raw:: html + + + + .. _tutorials-embedder: Embedder @@ -124,7 +137,7 @@ If we want to decreate the embedding dimension to only 256 to save memory, we ca .. code-block:: python - from adalflow.core.types import Embedding + from adalflow.core.types import Embedding, EmbedderOutput from adalflow.core.functional import normalize_vector from typing import List from adalflow.core.component import Component @@ -139,14 +152,14 @@ If we want to decreate the embedding dimension to only 256 to save memory, we ca assert self.new_dim < self.old_dim, "new_dim should be less than old_dim" def call(self, input: List[Embedding]) -> List[Embedding]: - output: List[Embedding] = deepcopy(input) - for embedding in output: + output: EmbedderOutput = deepcopy(input) + for embedding in output.data: old_embedding = embedding.embedding new_embedding = old_embedding[: self.new_dim] if self.normalize: new_embedding = normalize_vector(new_embedding) embedding.embedding = new_embedding - return output + return output.data def _extra_repr(self) -> str: repr_str = f"old_dim={self.old_dim}, new_dim={self.new_dim}, normalize={self.normalize}" diff --git a/docs/source/tutorials/generator.rst b/docs/source/tutorials/generator.rst index a170a8de5..2b406fda0 100644 --- a/docs/source/tutorials/generator.rst +++ b/docs/source/tutorials/generator.rst @@ -106,6 +106,161 @@ In particular, we created :class:`GeneratorOutput` t Whether to do further processing or terminate the pipeline whenever an error occurs is up to the user from here on. +Basic Generator Tutorial +===================== + +The Generator class is the core component in AdalFlow for interacting with AI models. This tutorial covers the essential concepts and patterns. + +What is a Generator? +------------------ + +A Generator is a unified interface for model interactions that: + +1. Takes input and formats it using a prompt template +2. Sends the formatted input to an AI model +3. Returns a standardized ``GeneratorOutput`` object + +Basic Usage +---------- + +Here's the simplest way to use a Generator: + +.. code-block:: python + + from adalflow.core import Generator + from adalflow.components.model_client.openai_client import OpenAIClient + + # Create a generator + gen = Generator( + model_client=OpenAIClient(), + model_kwargs={ + "model": "gpt-4o-mini", + "temperature": 0.7 + } + ) + + # Use the generator + response = gen({"input_str": "What is the capital of France?"}) + print(response.raw_response) + +Understanding the Output +---------------------- + +Every Generator call returns a ``GeneratorOutput`` object: + +.. code-block:: python + + response = gen({"input_str": "Hello"}) + + # Access different parts of the response + print(response.raw_response) # Raw model output + print(response.data) # Processed data (if using output processors) + print(response.error) # Error message if something went wrong + print(response.usage) # Token usage information + +When to Create a Subclass +----------------------- + +You should create a Generator subclass in two main cases: + +1. **Different Model Types**: When using non-LLM endpoints + + .. code-block:: python + + class ImageGenerator(Generator): + """For DALL-E and other image generation models""" + model_type = ModelType.IMAGE_GENERATION + +2. **Custom Processing**: When you need special input/output handling + + .. code-block:: python + + class CustomGenerator(Generator): + def _pre_call(self, prompt_kwargs, model_kwargs): + # Custom preprocessing + return super()._pre_call(prompt_kwargs, model_kwargs) + +When NOT to Subclass +------------------ + +Don't create a subclass for: + +1. **Model Parameters**: Use ``model_kwargs`` instead + + .. code-block:: python + + # Just pass parameters directly + gen = Generator( + model_client=client, + model_kwargs={ + "model": "gpt-4o-mini", + "temperature": 0.9 + } + ) + +2. **Output Processing**: Use output processors + + .. code-block:: python + + from adalflow.components.output_processors import JsonParser + + gen = Generator( + model_client=client, + output_processors=JsonParser() # Process output as JSON + ) + +Common Patterns +------------- + +1. **Error Handling**: + + .. code-block:: python + + response = gen({"input_str": "Query"}) + if response.error: + print(f"Error: {response.error}") + else: + print(response.raw_response) + +2. **Async Usage**: + + .. code-block:: python + + async def generate(): + response = await gen.acall({"input_str": "Hello"}) + print(response.raw_response) + +3. **Streaming**: + + .. code-block:: python + + gen = Generator( + model_client=client, + model_kwargs={"stream": True} + ) + for chunk in gen({"input_str": "Tell me a story"}): + print(chunk) + +Model Types +---------- + +Generator supports different model types through ``ModelType``: + +- ``ModelType.LLM``: Text generation (default) +- ``ModelType.IMAGE_GENERATION``: Image generation (DALL-E) +- ``ModelType.EMBEDDER``: Text embeddings +- ``ModelType.RERANKER``: Document reranking + +Best Practices +------------ + +1. Always check for errors in the response +2. Use output processors for structured outputs +3. Set model parameters in ``model_kwargs`` +4. Use async methods for better performance in async contexts +5. Use streaming for long responses + +Remember: The Generator is designed to provide a consistent interface regardless of the underlying model or task. Generator In Action --------------------------------------- @@ -119,10 +274,10 @@ The minimum setup to initiate a generator in the code: .. code-block:: python - from adalflow.core import Generator + import adalflow as adal from adalflow.components.model_client import GroqAPIClient - generator = Generator( + generator = adal.Generator( model_client=GroqAPIClient(), model_kwargs={"model": "llama3-8b-8192"}, ) @@ -480,6 +635,72 @@ It will require users to define ``Parameter`` and pass it to the ``prompt_kwargs .. If you change the LLM, you may need to update this tokenizer to ensure accurate token counts, chunking, and prompting. +Image Generation +------------------------------------------------- + +The Generator class also supports image generation through DALL-E models. First, you need to define a Generator subclass with the correct model type: + +.. code-block:: python + + from adalflow import Generator + from adalflow.core.types import ModelType + + class ImageGenerator(Generator): + """Generator subclass for image generation.""" + model_type = ModelType.IMAGE_GENERATION + +Then you can use it like this: + +.. code-block:: python + + from adalflow import OpenAIClient + + generator = ImageGenerator( + model_client=OpenAIClient(), + model_kwargs={ + "model": "dall-e-3", # or "dall-e-2" + "size": "1024x1024", # "1024x1024", "1024x1792", or "1792x1024" for DALL-E 3 + "quality": "standard", # "standard" or "hd" (DALL-E 3 only) + "n": 1 # Number of images (1 for DALL-E 3, 1-10 for DALL-E 2) + } + ) + + # Generate an image from text + response = generator( + prompt_kwargs={"input_str": "A white siamese cat in a space suit"} + ) + # response.data will contain the image URL + + # Edit an existing image + response = generator( + prompt_kwargs={"input_str": "Add a red hat"}, + model_kwargs={ + "model": "dall-e-2", + "image": "path/to/cat.png", # Original image + "mask": "path/to/mask.png" # Optional mask showing where to edit + } + ) + + # Create variations of an image + response = generator( + prompt_kwargs={"input_str": None}, # Not needed for variations + model_kwargs={ + "model": "dall-e-2", + "image": "path/to/cat.png" # Image to create variations of + } + ) + +The generator supports: + +- Image generation from text descriptions using DALL-E 3 or DALL-E 2 +- Image editing with optional masking (DALL-E 2) +- Creating variations of existing images (DALL-E 2) +- Both local file paths and base64-encoded images +- Various image sizes and quality settings +- Multiple output formats (URL or base64) + +The response will always be wrapped in a ``GeneratorOutput`` object, maintaining consistency with other AdalFlow operations. The generated image(s) will be available in the ``data`` field as either a URL or base64 string. + .. admonition:: API reference :class: highlight diff --git a/docs/source/tutorials/model_client.rst b/docs/source/tutorials/model_client.rst index 438d34d34..4f73e2ee6 100644 --- a/docs/source/tutorials/model_client.rst +++ b/docs/source/tutorials/model_client.rst @@ -1523,3 +1523,4 @@ This is the function call that triggers the execution of the custom model client - :class:`components.model_client.anthropic_client.AnthropicAPIClient` - :class:`components.model_client.google_client.GoogleGenAIClient` - :class:`components.model_client.cohere_client.CohereAPIClient` + diff --git a/docs/source/tutorials/tool_helper.rst b/docs/source/tutorials/tool_helper.rst index 4b607a26c..a9eadeb3b 100644 --- a/docs/source/tutorials/tool_helper.rst +++ b/docs/source/tutorials/tool_helper.rst @@ -510,8 +510,8 @@ We will use :class:`components.output_parsers.outputs.JsonOutputParser` to strea from adalflow.components.output_parsers import JsonOutputParser - func_parser = JsonOutputParser(data_class=Function) - instructions = func_parser.format_instructions(exclude=["thought", "args"]) + func_parser = JsonOutputParser(data_class=Function, exclude_fields=["thought", "args"]) + instructions = func_parser.format_instructions() print(instructions) The output is: @@ -542,9 +542,7 @@ Now, let's prepare our generator with the above prompt, ``Function`` data class, model_kwargs = {"model": "gpt-3.5-turbo"} prompt_kwargs = { "tools": tool_manager.yaml_definitions, - "output_format_str": func_parser.format_instructions( - exclude=["thought", "args"] - ), + "output_format_str": func_parser.format_instructions(), } generator = Generator( model_client=ModelClientType.OPENAI(), diff --git a/docs/source/tutorials/trainer.rst b/docs/source/tutorials/trainer.rst index a8b1d750f..fd70778cb 100644 --- a/docs/source/tutorials/trainer.rst +++ b/docs/source/tutorials/trainer.rst @@ -3,3 +3,62 @@ Trainer ================ Coming soon! + +Diagnose mode + +A pipeline can consist of multiple generators or retrievers. Each + + +Computation graph +------------------- +We design two types of graphs: + +1. with a simple node-graph with consistent naming of each generator(component_name or automated name by the recursive tracing (need to be consistent eventually)) [Call it thumbnail] or a better name. +2. with details for debugging and building the pipeline. + +EvalFunction + Score(s) +------------------------ +Currently we can assume we only support one eval_score, but eventually we need to suppport two scores, such as in the case of the multi-hop RAG. +The last llm call will have one score, and the previous two generators can potentially have two scores. One is from the last score, and the second will be from the output of the multi-hop retriever. + +So, we need to assign a unique and global component id/name. [Score, component_id, component_name] + +Observability +------------------------ +Building blocks include: `GeneratorCallLogger`, `RetrieverCallLogger`, `LossCallLogger` where each only traces a single component. + +In `AdalComponnet`, `configure_callbacks` we need both `_auto_generator_callbacks` and `_auto_retriever_callbacks` to be able to trace the call of each component. + +..code-block:: python + + for name, generator in all_generators: + call_logger = GeneratorCallLogger(save_dir=save_dir) + call_logger.reset() + call_logger.register_generator(name) + logger_call = partial(call_logger.log_call, name) + generator.register_callback( + "on_complete", partial(_on_completion_callback, logger_call=logger_call) + ) + file_path = call_logger.get_log_location(name) + file_paths.append(file_path) + log.debug(f"Registered callback for {name}, file path: {file_path}") + + +so when tracing, the `logger_metadata.json` will look like this: + +.. code-block:: json + + { + "retriever.query_generators.0": "/Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/diagnose_train/retriever.query_generators.0_call.jsonl", + "retriever.query_generators.1": "/Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/diagnose_train/retriever.query_generators.1_call.jsonl", + "llm": "/Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/diagnose_train/llm_call.jsonl" + } + +TODO: +- [ ] support multiple eval scores. +- [ ] logger meta data + + { + "llm": "/Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/diagnose_train/llm_call.jsonl" +} +- [ ] retriever log: call_logger = GeneratorCallLogger(save_dir=save_dir) diff --git a/docs/source/use_cases/multi_hop_rag_opt.rst b/docs/source/use_cases/multi_hop_rag_opt.rst new file mode 100644 index 000000000..b5752d285 --- /dev/null +++ b/docs/source/use_cases/multi_hop_rag_opt.rst @@ -0,0 +1,86 @@ +Multi-hop RAG Optimization +============================ + + +question: How many storeys are in the castle that David Gregory inherited? + +query 0: Number of storeys in the castle inherited by David Gregory + +Add context from retriever -> query generator + + +query 1: Kinnairdy Castle storeys OR floors OR levels + +So eventually the multi-hop RAG with the multi-hop retriever that combines the advanced Generator to transform the query into multiple querires (similar to REACT agent design) +. By knowing the castle name from the first query and retrieval (not seen from the question itself), the second time it will be able to retrieve the right context the second time. +Of course, we can even let the LLM workflow decide to stop the retrieval once it has obtained enough information. + + +When multi-hop is not enabled, the vanilla rag failed to give the answer. +When it is enabled, the answer is correct. + +resoning: + +David Gregory inherited Kinnairdy Castle, which is a tower house having five storeys and a garret, located two miles south of Aberchirder, Aberdeenshire, Scotland. + +answr: Kinnairdy Castle has five storeys." + +Other logs: + +----------------- + +1. fix the dspy code at `.venv/lib/python3.12/site-packages/dsp/modules/colbertv2.py` + +.. code-block::python + + from tenacity import retry, stop_after_attempt, wait_exponential + + + @CacheMemory.cache + @retry( + stop=stop_after_attempt(5), + wait=wait_exponential(multiplier=1, min=2, max=10), + reraise=True, + ) + def colbertv2_get_request_v2(url: str, query: str, k: int): + assert k <= 100, "Only k <= 100 is supported for the hosted ColBERTv2 server." + + payload = {"query": query, "k": k} + + try: + res = requests.get(url, params=payload, timeout=10) + res.raise_for_status() + response_json = res.json() + + # Check for an error in the response. + if response_json.get("error"): + raise ConnectionError(f"Error from server: {response_json['message']}") + + # If we get a valid 'topk' response, return immediately. + if "topk" in response_json: + topk = response_json["topk"][:k] + return [{**d, "long_text": d["text"]} for d in topk] + + except requests.exceptions.Timeout: + raise TimeoutError("The request timed out. Please try again.") + except requests.exceptions.RequestException as e: + raise ConnectionError(f"Request failed: {e}") + + raise KeyError("'topk' key not found in the response.") + +2. If error in diagnose similar to + + ..code-block:: python + + Error loading jsonl file /Users/liyin/.adalflow/ckpt/MultiHopRAGAdal/diagnose_train/llm_call.jsonl: line contains invalid json: unexpected content after document: line 1 column 8568 (char 8567) (line 62) + Traceback (most recent call last): + File "/Users/liyin/Documents/test/LightRAG/benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py", line 153, in + train_diagnose(**gpt_3_model) + File "/Users/liyin/Documents/test/LightRAG/benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py", line 97, in train_diagnose + trainer.diagnose(dataset=trainset, split="train") + File "/Users/liyin/Documents/test/LightRAG/adalflow/adalflow/optim/trainer/trainer.py", line 228, in diagnose + sorted_logs = [logs_dict[id] for id in sorted_ids] + ~~~~~~~~~^^^^ + KeyError: '5a8b57f25542995d1e6f1371' + +You can go to the `llm_call.jsonl` file and clean all content of the file. Then rerun the training script. diff --git a/notebooks/adalflow_colab_template.ipynb b/notebooks/adalflow_colab_template.ipynb index 39715816e..746f12d30 100644 --- a/notebooks/adalflow_colab_template.ipynb +++ b/notebooks/adalflow_colab_template.ipynb @@ -58,6 +58,17 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "markdown", "metadata": {}, diff --git a/notebooks/qas/adalflow_object_count_auto_optimization.ipynb b/notebooks/qas/adalflow_object_count_auto_optimization.ipynb index 9308ea7f1..533ba1249 100644 --- a/notebooks/qas/adalflow_object_count_auto_optimization.ipynb +++ b/notebooks/qas/adalflow_object_count_auto_optimization.ipynb @@ -62,6 +62,17 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/tutorials/adalflow_classification_optimization.ipynb b/notebooks/tutorials/adalflow_classification_optimization.ipynb index c6bddc7e7..0d3034f71 100644 --- a/notebooks/tutorials/adalflow_classification_optimization.ipynb +++ b/notebooks/tutorials/adalflow_classification_optimization.ipynb @@ -62,7 +62,7 @@ }, { "cell_type": "code", - "execution_count": 42, + "execution_count": null, "metadata": { "id": "tAp3eDjOCma1" }, @@ -75,6 +75,63 @@ "clear_output()" ] }, + { + "cell_type": "code", + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install “anyio>=3.1.0,<4.0”\n", + "!pip install httpx==0.24.1" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "CU672Gt4bY7b", + "outputId": "532c84d2-c7bd-40ac-c050-e2c5dddc8946" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Found existing installation: httpx 0.28.1\n", + "Uninstalling httpx-0.28.1:\n", + " Successfully uninstalled httpx-0.28.1\n", + "Found existing installation: anyio 3.7.1\n", + "Uninstalling anyio-3.7.1:\n", + " Successfully uninstalled anyio-3.7.1\n", + "/bin/bash: line 1: 4.0”: No such file or directory\n", + "Collecting httpx==0.24.1\n", + " Downloading httpx-0.24.1-py3-none-any.whl.metadata (7.4 kB)\n", + "Requirement already satisfied: certifi in /usr/local/lib/python3.10/dist-packages (from httpx==0.24.1) (2024.8.30)\n", + "Collecting httpcore<0.18.0,>=0.15.0 (from httpx==0.24.1)\n", + " Downloading httpcore-0.17.3-py3-none-any.whl.metadata (18 kB)\n", + "Requirement already satisfied: idna in /usr/local/lib/python3.10/dist-packages (from httpx==0.24.1) (3.10)\n", + "Requirement already satisfied: sniffio in /usr/local/lib/python3.10/dist-packages (from httpx==0.24.1) (1.3.1)\n", + "Requirement already satisfied: h11<0.15,>=0.13 in /usr/local/lib/python3.10/dist-packages (from httpcore<0.18.0,>=0.15.0->httpx==0.24.1) (0.14.0)\n", + "Collecting anyio<5.0,>=3.0 (from httpcore<0.18.0,>=0.15.0->httpx==0.24.1)\n", + " Downloading anyio-4.7.0-py3-none-any.whl.metadata (4.7 kB)\n", + "Requirement already satisfied: exceptiongroup>=1.0.2 in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->httpcore<0.18.0,>=0.15.0->httpx==0.24.1) (1.2.2)\n", + "Requirement already satisfied: typing_extensions>=4.5 in /usr/local/lib/python3.10/dist-packages (from anyio<5.0,>=3.0->httpcore<0.18.0,>=0.15.0->httpx==0.24.1) (4.12.2)\n", + "Downloading httpx-0.24.1-py3-none-any.whl (75 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.4/75.4 kB\u001b[0m \u001b[31m2.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading httpcore-0.17.3-py3-none-any.whl (74 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m74.5/74.5 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hDownloading anyio-4.7.0-py3-none-any.whl (93 kB)\n", + "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m93.1/93.1 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", + "\u001b[?25hInstalling collected packages: anyio, httpcore, httpx\n", + " Attempting uninstall: httpcore\n", + " Found existing installation: httpcore 1.0.7\n", + " Uninstalling httpcore-1.0.7:\n", + " Successfully uninstalled httpcore-1.0.7\n", + "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", + "jupyter-server 1.24.0 requires anyio<4,>=3.1.0, but you have anyio 4.7.0 which is incompatible.\u001b[0m\u001b[31m\n", + "\u001b[0mSuccessfully installed anyio-4.7.0 httpcore-0.17.3 httpx-0.24.1\n" + ] + } + ] + }, { "cell_type": "markdown", "source": [ @@ -111,9 +168,9 @@ "base_uri": "https://localhost:8080/" }, "id": "ONfzF9Puzdd_", - "outputId": "e5c3cfc5-69cb-448a-c248-a8cebda5ba71" + "outputId": "a8ca0388-be6e-4b7a-cd05-d4ec52f64e95" }, - "execution_count": 43, + "execution_count": null, "outputs": [ { "output_type": "stream", @@ -125,6 +182,15 @@ } ] }, + { + "cell_type": "markdown", + "source": [ + "Prepare data structures and prompt template" + ], + "metadata": { + "id": "4W3yEpRpepNK" + } + }, { "cell_type": "code", "source": [ @@ -135,6 +201,8 @@ "import adalflow as adal\n", "from adalflow.core.component import Component\n", "from adalflow.datasets.types import TrecData\n", + "from adalflow.datasets.trec import TrecDataset\n", + "\n", "from adalflow.eval.answer_match_acc import AnswerMatchAcc\n", "\n", "\n", @@ -188,14 +256,70 @@ " __output_fields__ = [\n", " \"rationale\",\n", " \"class_name\",\n", - " ] # it is important to have the rationale before the class_name" + " ] # it is important to have the rationale before the class_name\n", + "\n", + "\n", + "def load_datasets():\n", + " \"\"\"Load the dataset\"\"\"\n", + " train_data = TrecDataset(split=\"train\")\n", + " val_data = TrecDataset(split=\"val\")\n", + " test_data = TrecDataset(split=\"test\")\n", + " return train_data, val_data, test_data # 0.694, 0.847" ], "metadata": { "id": "ZZIEtZYHNVjo" }, - "execution_count": 49, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "# prepare models\n", + "\n", + "from adalflow.components.model_client.openai_client import OpenAIClient\n", + "\n", + "# used as the target model\n", + "gpt_3_model = {\n", + " \"model_client\": OpenAIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"gpt-3.5-turbo\",\n", + " \"max_tokens\": 2000,\n", + " \"temperature\": 0.0,\n", + " \"top_p\": 0.99,\n", + " \"frequency_penalty\": 0,\n", + " \"presence_penalty\": 0,\n", + " \"stop\": None,\n", + " },\n", + "}\n", + "\n", + "# used as optimizer and backward engine\n", + "gpt_4o_mini_model = {\n", + " \"model_client\": OpenAIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"temperature\": 1,\n", + " \"top_p\": 0.99,\n", + " \"max_tokens\": 1000,\n", + " # \"frequency_penalty\": 1, # high for nto repeating prompt\n", + " },\n", + "}" + ], + "metadata": { + "id": "yAvzn7DZeUX-" + }, + "execution_count": null, "outputs": [] }, + { + "cell_type": "markdown", + "source": [ + "Create the task pipeline" + ], + "metadata": { + "id": "G664uy9MgDdC" + } + }, { "cell_type": "code", "source": [ @@ -270,9 +394,112 @@ "metadata": { "id": "3Q3H9XC4Ncfi" }, - "execution_count": 50, + "execution_count": null, "outputs": [] }, + { + "cell_type": "markdown", + "source": [ + "Inference the task pipeline and draw the computation graph" + ], + "metadata": { + "id": "gj08oOqqgGyr" + } + }, + { + "cell_type": "code", + "source": [ + "# load dataset to get one example\n", + "\n", + "train_dataset, val_dataset, test_dataset = load_datasets()\n", + "example = train_dataset[0]\n", + "print(example)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "qtvLN8zOgnSg", + "outputId": "9996f8c3-371d-4b5c-ec48-e8cf6d6c396b" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "TrecData(id='e73a82a7-6a3d-4947-90f5-03739e169db0', question='When reading classified ads , what does EENTY : other stand for ?', class_name='ABBR', class_index=0)\n" + ] + } + ] + }, + { + "cell_type": "code", + "source": [ + "task = TRECClassifierStructuredOutput(\n", + " model_client=gpt_3_model[\"model_client\"],\n", + " model_kwargs=gpt_3_model[\"model_kwargs\"],\n", + ")\n", + "task.train()\n", + "\n", + "output = task(question=example.question, id=example.id)\n", + "print(output)" + ], + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "cKuW3QlhgLTG", + "outputId": "7f1f9cd6-9615-4b41-ecc5-5901626d57ae" + }, + "execution_count": null, + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Parameter(name=Generator_output, requires_opt=True, param_type=generator_output (The output of the generator.), role_desc=Output from (llm) Generator, data=```\n", + "rationale: The question is asking for the meaning of the abbreviation \"EENTY\" in classified ads, which falls under the ABBR class.\n", + "class_name: ABBR\n", + "```, predecessors={Parameter(name=Output_for, requires_opt=False, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=Output format requirements, data=Your output should be formatted as a standard YAML instance with the following schema:\n", + "```\n", + "rationale: Your step-by-step reasoning to classify the question to class_name (str) (optional)\n", + "class_name: One of {ABBR, ENTY, DESC, HUM, LOC, NUM} (str) (optional)\n", + "```\n", + "-Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\n", + "-Follow the YAML formatting conventions with an indent of 2 spaces.\n", + "-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\n", + "-Quote the string values properly., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), Parameter(name=Few_shot_e, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=Few shot examples to help the model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), Parameter(name=Input_to_t, requires_opt=False, param_type=none (), role_desc=input to the LLM, data=question: 'When reading classified ads , what does EENTY : other stand for ?', predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), Parameter(name=Task_descr, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=Task description, data=You are a classifier. Given a question, you need to classify it into one of the following classes:\n", + " Format: class_index. class_name, class_description\n", + " 0. ABBR, Abbreviation: Questions about abbreviations and their meanings\n", + " 1. DESC, Description: Questions seeking descriptions of people, things, or concepts\n", + " 2. ENTY, Entity: Questions about entities (e.g., animals, colors, inventions)\n", + " 3. HUM, Human: Questions about people or organizations\n", + " 4. LOC, Location: Questions about places, cities, countries\n", + " 5. NUM, Numeric: Questions seeking numeric answers (e.g., dates, amounts, distances)\n", + " - Do not try to answer the question:\n", + " , predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={})}, gradients=[], raw_response=None, input_args={'prompt_kwargs': {'system_prompt': Parameter(name=Task_descr, requires_opt=True, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=Task description, data=You are a classifier. Given a question, you need to classify it into one of the following classes:\n", + " Format: class_index. class_name, class_description\n", + " 0. ABBR, Abbreviation: Questions about abbreviations and their meanings\n", + " 1. DESC, Description: Questions seeking descriptions of people, things, or concepts\n", + " 2. ENTY, Entity: Questions about entities (e.g., animals, colors, inventions)\n", + " 3. HUM, Human: Questions about people or organizations\n", + " 4. LOC, Location: Questions about places, cities, countries\n", + " 5. NUM, Numeric: Questions seeking numeric answers (e.g., dates, amounts, distances)\n", + " - Do not try to answer the question:\n", + " , predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'output_format_str': Parameter(name=Output_for, requires_opt=False, param_type=prompt (Instruction to the language model on task, data, and format.), role_desc=Output format requirements, data=Your output should be formatted as a standard YAML instance with the following schema:\n", + "```\n", + "rationale: Your step-by-step reasoning to classify the question to class_name (str) (optional)\n", + "class_name: One of {ABBR, ENTY, DESC, HUM, LOC, NUM} (str) (optional)\n", + "```\n", + "-Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\n", + "-Follow the YAML formatting conventions with an indent of 2 spaces.\n", + "-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\n", + "-Quote the string values properly., predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'few_shot_demos': Parameter(name=Few_shot_e, requires_opt=True, param_type=demos (A few examples to guide the language model.), role_desc=Few shot examples to help the model, data=None, predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={}), 'input_str': Parameter(name=Input_to_t, requires_opt=False, param_type=none (), role_desc=input to the LLM, data=question: 'When reading classified ads , what does EENTY : other stand for ?', predecessors=set(), gradients=[], raw_response=None, input_args=None, traces={})}, 'model_kwargs': {'model': 'gpt-3.5-turbo', 'max_tokens': 2000, 'temperature': 0.0, 'top_p': 0.99, 'frequency_penalty': 0, 'presence_penalty': 0, 'stop': None}}, traces={})\n" + ] + } + ] + }, { "cell_type": "code", "source": [ @@ -335,7 +562,7 @@ "metadata": { "id": "HpkQYsh2NevT" }, - "execution_count": 51, + "execution_count": null, "outputs": [] }, { @@ -357,13 +584,16 @@ "\n", " # Define the model configuration for all components\n", " gpt_4o_model = {\n", - " \"model\": \"gpt-4-turbo-preview\",\n", - " \"temperature\": 0,\n", - " \"max_tokens\": 1000,\n", - " \"top_p\": 1,\n", - " \"frequency_penalty\": 0,\n", - " \"presence_penalty\": 0,\n", + " \"model_client\": OpenAIClient(),\n", + " \"model_kwargs\": {\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"temperature\": 1,\n", + " \"top_p\": 0.99,\n", + " \"max_tokens\": 1000,\n", + " # \"frequency_penalty\": 1, # high for nto repeating prompt\n", + " },\n", " }\n", + "\n", " print(f\"Component model configuration: {gpt_4o_model}\")\n", "\n", " try:\n", @@ -414,35 +644,312 @@ "metadata": { "id": "PEj6xiZ5dVaj" }, - "execution_count": 52, + "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ - "from adalflow.components.model_client.openai_client import OpenAIClient\n", - "\n", - "\n", - "gpt_4o_model = {\n", - " \"model_client\": OpenAIClient(),\n", - " \"model_kwargs\": {\n", - " \"model\": \"gpt-4o-mini\",\n", - " \"max_tokens\": 2000,\n", - " },\n", - "}\n", - "\n", - "\n", - "train(\n", - " model_client=OpenAIClient(),\n", - " model_kwargs=gpt_4o_model,\n", - ")" + "train(**gpt_3_model)" ], "metadata": { "id": "GnlZBQOMEj6E", - "collapsed": true + "colab": { + "base_uri": "https://localhost:8080/", + "height": 1000 + }, + "outputId": "055a95c4-ccae-4028-d904-86b839bc1c14" }, "execution_count": null, - "outputs": [] + "outputs": [ + { + "output_type": "stream", + "name": "stdout", + "text": [ + "Starting training process...\n", + "Component model configuration: {'model_client': OpenAIClient(), 'model_kwargs': {'model': 'gpt-4o-mini', 'temperature': 1, 'top_p': 0.99, 'max_tokens': 1000}}\n", + "Initializing ADAL component...\n", + "ADAL component initialized successfully\n", + "Initializing trainer...\n", + "Trainer initialized successfully\n", + "Loading datasets...\n", + "Datasets loaded - Train size: 120, Val size: 36, Test size: 144\n", + "Starting model training...\n", + "raw_shots: 0, bootstrap_shots: 1\n", + "Configuring teacher generator.\n", + "Configuring teacher generator for Generator(\n", + " model_kwargs={'model': 'gpt-4o-mini', 'temperature': 1, 'top_p': 0.99, 'max_tokens': 1000}, trainable_prompt_kwargs=[]\n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {% if output_format_str is not none %}\n", + " {{output_format_str}}\n", + " {% endif %}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': 'You are a classifier. Given a question, you need to classify it into one of the following classes:\\n Format: class_index. class_name, class_description\\n 0. ABBR, Abbreviation: Questions about abbreviations and their meanings\\n 1. DESC, Description: Questions seeking descriptions of people, things, or concepts\\n 2. ENTY, Entity: Questions about entities (e.g., animals, colors, inventions)\\n 3. HUM, Human: Questions about people or organizations\\n 4. LOC, Location: Questions about places, cities, countries\\n 5. NUM, Numeric: Questions seeking numeric answers (e.g., dates, amounts, distances)\\n - Do not try to answer the question:\\n ', 'output_format_str': 'Your output should be formatted as a standard YAML instance with the following schema:\\n```\\nrationale: Your step-by-step reasoning to classify the question to class_name (str) (optional)\\nclass_name: One of {ABBR, ENTY, DESC, HUM, LOC, NUM} (str) (optional)\\n```\\n-Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\\n-Follow the YAML formatting conventions with an indent of 2 spaces.\\n-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\\n-Quote the string values properly.', 'few_shot_demos': 'None'}, prompt_variables: ['output_format_str', 'system_prompt', 'input_str', 'few_shot_demos']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): DataClassParser(\n", + " data_class=TRECExtendedData, format_type=yaml, return_data_class=True, input_fields=['question'], output_fields=['rationale', 'class_name']\n", + " (_output_processor): YamlParser()\n", + " (output_format_prompt): Prompt(\n", + " template: Your output should be formatted as a standard YAML instance with the following schema:\n", + " ```\n", + " {{schema}}\n", + " ```\n", + " -Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\n", + " -Follow the YAML formatting conventions with an indent of 2 spaces.\n", + " -DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\n", + " -Quote the string values properly., prompt_variables: ['schema']\n", + " )\n", + " )\n", + ")\n", + "Teacher generator set: Generator(\n", + " model_kwargs={'model': 'gpt-4o-mini', 'temperature': 1, 'top_p': 0.99, 'max_tokens': 1000}, trainable_prompt_kwargs=[]\n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {% if output_format_str is not none %}\n", + " {{output_format_str}}\n", + " {% endif %}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': 'You are a classifier. Given a question, you need to classify it into one of the following classes:\\n Format: class_index. class_name, class_description\\n 0. ABBR, Abbreviation: Questions about abbreviations and their meanings\\n 1. DESC, Description: Questions seeking descriptions of people, things, or concepts\\n 2. ENTY, Entity: Questions about entities (e.g., animals, colors, inventions)\\n 3. HUM, Human: Questions about people or organizations\\n 4. LOC, Location: Questions about places, cities, countries\\n 5. NUM, Numeric: Questions seeking numeric answers (e.g., dates, amounts, distances)\\n - Do not try to answer the question:\\n ', 'output_format_str': 'Your output should be formatted as a standard YAML instance with the following schema:\\n```\\nrationale: Your step-by-step reasoning to classify the question to class_name (str) (optional)\\nclass_name: One of {ABBR, ENTY, DESC, HUM, LOC, NUM} (str) (optional)\\n```\\n-Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\\n-Follow the YAML formatting conventions with an indent of 2 spaces.\\n-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\\n-Quote the string values properly.', 'few_shot_demos': 'None'}, prompt_variables: ['output_format_str', 'system_prompt', 'input_str', 'few_shot_demos']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): DataClassParser(\n", + " data_class=TRECExtendedData, format_type=yaml, return_data_class=True, input_fields=['question'], output_fields=['rationale', 'class_name']\n", + " (_output_processor): YamlParser()\n", + " (output_format_prompt): Prompt(\n", + " template: Your output should be formatted as a standard YAML instance with the following schema:\n", + " ```\n", + " {{schema}}\n", + " ```\n", + " -Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\n", + " -Follow the YAML formatting conventions with an indent of 2 spaces.\n", + " -DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\n", + " -Quote the string values properly., prompt_variables: ['schema']\n", + " )\n", + " )\n", + "), teacher Generator(\n", + " model_kwargs={'model': 'gpt-4o-mini', 'temperature': 1, 'top_p': 0.99, 'max_tokens': 1000}, trainable_prompt_kwargs=[]\n", + " (prompt): Prompt(\n", + " template: \n", + " {{system_prompt}}\n", + " {% if output_format_str is not none %}\n", + " {{output_format_str}}\n", + " {% endif %}\n", + " {% if few_shot_demos is not none %}\n", + " Here are some examples:\n", + " {{few_shot_demos}}\n", + " {% endif %}\n", + " \n", + " \n", + " {{input_str}}\n", + " \n", + " , prompt_kwargs: {'system_prompt': 'You are a classifier. Given a question, you need to classify it into one of the following classes:\\n Format: class_index. class_name, class_description\\n 0. ABBR, Abbreviation: Questions about abbreviations and their meanings\\n 1. DESC, Description: Questions seeking descriptions of people, things, or concepts\\n 2. ENTY, Entity: Questions about entities (e.g., animals, colors, inventions)\\n 3. HUM, Human: Questions about people or organizations\\n 4. LOC, Location: Questions about places, cities, countries\\n 5. NUM, Numeric: Questions seeking numeric answers (e.g., dates, amounts, distances)\\n - Do not try to answer the question:\\n ', 'output_format_str': 'Your output should be formatted as a standard YAML instance with the following schema:\\n```\\nrationale: Your step-by-step reasoning to classify the question to class_name (str) (optional)\\nclass_name: One of {ABBR, ENTY, DESC, HUM, LOC, NUM} (str) (optional)\\n```\\n-Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\\n-Follow the YAML formatting conventions with an indent of 2 spaces.\\n-DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\\n-Quote the string values properly.', 'few_shot_demos': 'None'}, prompt_variables: ['output_format_str', 'system_prompt', 'input_str', 'few_shot_demos']\n", + " )\n", + " (model_client): OpenAIClient()\n", + " (output_processors): DataClassParser(\n", + " data_class=TRECExtendedData, format_type=yaml, return_data_class=True, input_fields=['question'], output_fields=['rationale', 'class_name']\n", + " (_output_processor): YamlParser()\n", + " (output_format_prompt): Prompt(\n", + " template: Your output should be formatted as a standard YAML instance with the following schema:\n", + " ```\n", + " {{schema}}\n", + " ```\n", + " -Make sure to always enclose the YAML output in triple backticks (```). Please do not add anything other than valid YAML output!\n", + " -Follow the YAML formatting conventions with an indent of 2 spaces.\n", + " -DO NOT mistaken the \"properties\" and \"type\" in the schema as the actual fields in the YAML output.\n", + " -Quote the string values properly., prompt_variables: ['schema']\n", + " )\n", + " )\n", + ")\n", + "Teacher generator configured.\n", + "Configured demo optimizers\n", + "Backward engine configured for all generators.\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "\n", + "Loading Data: 100%|██████████| 144/144 [00:00<00:00, 9161.62it/s]\n", + "Predicting: step(0): 0.8264 across 144 samples, Max potential: 0.8264: 100%|██████████| 144/144 [00:19<00:00, 7.39it/s]\n" + ] + }, + { + "output_type": "stream", + "name": "stdout", + "text": [ + "completed_samples: 144, len: 144\n", + "Initial validation score: 0.8263888888888888\n", + "Initial test score: None\n", + "Checkpoint path: /root/.adalflow/ckpt/TrecClassifierAdal\n", + "save to /root/.adalflow/ckpt/TrecClassifierAdal/constrained_max_steps_12_a6e76_run_1.json\n" + ] + }, + { + "output_type": "stream", + "name": "stderr", + "text": [ + "\n", + "Training Step: 1: 0%| | 0/30 [00:00\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mgpt_3_model\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(model_client, model_kwargs, train_batch_size, raw_shots, bootstrap_shots, max_steps, num_workers, strategy, optimization_order, debug)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"Starting model training...\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m trainer.fit(\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0mtrain_dataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtrain_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0mval_dataset\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtest_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/optim/trainer/trainer.py\u001b[0m in \u001b[0;36mfit\u001b[0;34m(self, adaltask, train_loader, train_dataset, val_dataset, test_dataset, debug, save_traces, raw_shots, bootstrap_shots, resume_from_ckpt)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0mstarting_step\u001b[0m \u001b[0;34m+=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax_steps\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \u001b[0;32melif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstrategy\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"constrained\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 479\u001b[0;31m trainer_results = self._fit_text_grad_constraint(\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0mtrain_loader\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0mval_dataset\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/optim/trainer/trainer.py\u001b[0m in \u001b[0;36m_fit_text_grad_constraint\u001b[0;34m(self, train_loader, val_dataset, test_dataset, trainer_results, starting_step)\u001b[0m\n\u001b[1;32m 1779\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1780\u001b[0m all_samples, all_losses, all_y_preds = (\n\u001b[0;32m-> 1781\u001b[0;31m self._text_grad_constraint_propose_step(\n\u001b[0m\u001b[1;32m 1782\u001b[0m \u001b[0msteps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0msteps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1783\u001b[0m \u001b[0mall_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mall_samples\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/optim/trainer/trainer.py\u001b[0m in \u001b[0;36m_text_grad_constraint_propose_step\u001b[0;34m(self, steps, all_samples, all_losses, all_y_preds, include_demo_optimizers)\u001b[0m\n\u001b[1;32m 1657\u001b[0m \u001b[0;31m# print(f\"Proposing step: {i}\")\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1658\u001b[0m \u001b[0;31m# self.optimizer.propose()\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1659\u001b[0;31m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_propose_text_optimizers\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# new prompts\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1660\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0minclude_demo_optimizers\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1661\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_demo_optimizers_propose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/optim/trainer/trainer.py\u001b[0m in \u001b[0;36m_propose_text_optimizers\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 857\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_propose_text_optimizers\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 858\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mtext_optimizer\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtext_optimizers\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 859\u001b[0;31m \u001b[0mtext_optimizer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpropose\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 860\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 861\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_trainable_text_params\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/optim/text_grad/tgd_optimizer.py\u001b[0m in \u001b[0;36mpropose\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 323\u001b[0m }\n\u001b[1;32m 324\u001b[0m \u001b[0;31m# turn off cache\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 325\u001b[0;31m response = self.llm_optimizer.call(\n\u001b[0m\u001b[1;32m 326\u001b[0m \u001b[0mprompt_kwargs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mprompt_kwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0muse_cache\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mnot\u001b[0m \u001b[0mno_cache\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 327\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/core/generator.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, prompt_kwargs, model_kwargs, use_cache, id)\u001b[0m\n\u001b[1;32m 771\u001b[0m \u001b[0;31m# call the model client\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 772\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 773\u001b[0;31m \u001b[0mcompletion\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 774\u001b[0m \u001b[0muse_cache\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0muse_cache\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0muse_cache\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_use_cache\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 775\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/core/generator.py\u001b[0m in \u001b[0;36m_model_client_call\u001b[0;34m(self, api_kwargs, use_cache)\u001b[0m\n\u001b[1;32m 345\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 346\u001b[0m \u001b[0mcached_completion\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_check_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mindex_content\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 347\u001b[0;31m \u001b[0;32mif\u001b[0m \u001b[0mcached_completion\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 348\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mcached_completion\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/backoff/_sync.py\u001b[0m in \u001b[0;36mretry\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 104\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 105\u001b[0;31m \u001b[0mret\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 106\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mexception\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0mmax_tries_exceeded\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mtries\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mmax_tries_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/adalflow/components/model_client/openai_client.py\u001b[0m in \u001b[0;36mcall\u001b[0;34m(self, api_kwargs, model_type)\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat_completion_parser\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mhandle_streaming_response\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 286\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msync_client\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompletions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mapi_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 287\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msync_client\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mchat\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompletions\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcreate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mapi_kwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 288\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 289\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mf\"model_type {model_type} is not supported\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_utils/_utils.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 273\u001b[0m \u001b[0mmsg\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34mf\"Missing required argument: {quote(missing[0])}\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 274\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 275\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 276\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 277\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapper\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/resources/chat/completions.py\u001b[0m in \u001b[0;36mcreate\u001b[0;34m(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, response_format, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, extra_headers, extra_query, extra_body, timeout)\u001b[0m\n\u001b[1;32m 827\u001b[0m ) -> ChatCompletion | Stream[ChatCompletionChunk]:\n\u001b[1;32m 828\u001b[0m \u001b[0mvalidate_response_format\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse_format\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 829\u001b[0;31m return self._post(\n\u001b[0m\u001b[1;32m 830\u001b[0m \u001b[0;34m\"/chat/completions\"\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 831\u001b[0m body=maybe_transform(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mpost\u001b[0;34m(self, path, cast_to, body, options, files, stream, stream_cls)\u001b[0m\n\u001b[1;32m 1276\u001b[0m \u001b[0mmethod\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"post\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0murl\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mjson_data\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mbody\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfiles\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mto_httpx_files\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfiles\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1277\u001b[0m )\n\u001b[0;32m-> 1278\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcast\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mResponseT\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mopts\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstream_cls\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream_cls\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1279\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1280\u001b[0m def patch(\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36mrequest\u001b[0;34m(self, cast_to, options, remaining_retries, stream, stream_cls)\u001b[0m\n\u001b[1;32m 953\u001b[0m \u001b[0mretries_taken\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;36m0\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 954\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 955\u001b[0;31m return self._request(\n\u001b[0m\u001b[1;32m 956\u001b[0m \u001b[0mcast_to\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcast_to\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 957\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/openai/_base_client.py\u001b[0m in \u001b[0;36m_request\u001b[0;34m(self, cast_to, options, retries_taken, stream, stream_cls)\u001b[0m\n\u001b[1;32m 989\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 990\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 991\u001b[0;31m response = self._client.send(\n\u001b[0m\u001b[1;32m 992\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 993\u001b[0m \u001b[0mstream\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mstream\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_should_stream_response_body\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_client.py\u001b[0m in \u001b[0;36msend\u001b[0;34m(self, request, stream, auth, follow_redirects)\u001b[0m\n\u001b[1;32m 899\u001b[0m \u001b[0mauth\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_build_request_auth\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauth\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 900\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 901\u001b[0;31m response = self._send_handling_auth(\n\u001b[0m\u001b[1;32m 902\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 903\u001b[0m \u001b[0mauth\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mauth\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_client.py\u001b[0m in \u001b[0;36m_send_handling_auth\u001b[0;34m(self, request, auth, follow_redirects, history)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 929\u001b[0;31m response = self._send_handling_redirects(\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mfollow_redirects\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mfollow_redirects\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_client.py\u001b[0m in \u001b[0;36m_send_handling_redirects\u001b[0;34m(self, request, follow_redirects, history)\u001b[0m\n\u001b[1;32m 964\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 965\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 966\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_send_single_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 967\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 968\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_event_hooks\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"response\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_client.py\u001b[0m in \u001b[0;36m_send_single_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 1000\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1001\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mrequest_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1002\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtransport\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1003\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1004\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresponse\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSyncByteStream\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpx/_transports/default.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 216\u001b[0m )\n\u001b[1;32m 217\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mmap_httpcore_exceptions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 218\u001b[0;31m \u001b[0mresp\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_pool\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mreq\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 219\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 220\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstream\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtyping\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mIterable\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection_pool.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mShieldCancellation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 261\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mresponse_closed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstatus\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 262\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 263\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 264\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection_pool.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mresponse\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mconnection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mConnectionNotAvailable\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;31m# The ConnectionNotAvailable exception is a special case, that\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/connection.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[0;32mraise\u001b[0m \u001b[0mConnectionNotAvailable\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 95\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 96\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_connection\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhandle_request\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 97\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_connect\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mRequest\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0mNetworkStream\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 119\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mTrace\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"response_closed\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlogger\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequest\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mtrace\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 120\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_response_closed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 121\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mexc\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 122\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 123\u001b[0m \u001b[0;31m# Sending the request...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py\u001b[0m in \u001b[0;36mhandle_request\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0mreason_phrase\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0mheaders\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 99\u001b[0;31m ) = self._receive_response_headers(**kwargs)\n\u001b[0m\u001b[1;32m 100\u001b[0m trace.return_value = (\n\u001b[1;32m 101\u001b[0m \u001b[0mhttp_version\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py\u001b[0m in \u001b[0;36m_receive_response_headers\u001b[0;34m(self, request)\u001b[0m\n\u001b[1;32m 162\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 163\u001b[0m \u001b[0;32mwhile\u001b[0m \u001b[0;32mTrue\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 164\u001b[0;31m \u001b[0mevent\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_receive_event\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 165\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mevent\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mh11\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mResponse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 166\u001b[0m \u001b[0;32mbreak\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_sync/http11.py\u001b[0m in \u001b[0;36m_receive_event\u001b[0;34m(self, timeout)\u001b[0m\n\u001b[1;32m 198\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 199\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mevent\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0mh11\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNEED_DATA\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 200\u001b[0;31m data = self._network_stream.read(\n\u001b[0m\u001b[1;32m 201\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mREAD_NUM_BYTES\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 202\u001b[0m )\n", + "\u001b[0;32m/usr/local/lib/python3.10/dist-packages/httpcore/_backends/sync.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, max_bytes, timeout)\u001b[0m\n\u001b[1;32m 26\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mmap_exceptions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mexc_map\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 27\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msettimeout\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 28\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sock\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_bytes\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 29\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 30\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mwrite\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbuffer\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mbytes\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mtyping\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOptional\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mfloat\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/ssl.py\u001b[0m in \u001b[0;36mrecv\u001b[0;34m(self, buflen, flags)\u001b[0m\n\u001b[1;32m 1286\u001b[0m \u001b[0;34m\"non-zero flags not allowed in calls to recv() on %s\"\u001b[0m \u001b[0;34m%\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1287\u001b[0m self.__class__)\n\u001b[0;32m-> 1288\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuflen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1289\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1290\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbuflen\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mflags\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/lib/python3.10/ssl.py\u001b[0m in \u001b[0;36mread\u001b[0;34m(self, len, buffer)\u001b[0m\n\u001b[1;32m 1159\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sslobj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mbuffer\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1160\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1161\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_sslobj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlen\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1162\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0mSSLError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1163\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mSSL_ERROR_EOF\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msuppress_ragged_eofs\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ] }, { "cell_type": "markdown", diff --git a/notebooks/tutorials/adalflow_component.ipynb b/notebooks/tutorials/adalflow_component.ipynb index 8523a629e..66050d074 100644 --- a/notebooks/tutorials/adalflow_component.ipynb +++ b/notebooks/tutorials/adalflow_component.ipynb @@ -59,6 +59,17 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "code", "execution_count": 5, diff --git a/notebooks/tutorials/adalflow_dataclasses.ipynb b/notebooks/tutorials/adalflow_dataclasses.ipynb index 7ae08f63e..db35d95e2 100644 --- a/notebooks/tutorials/adalflow_dataclasses.ipynb +++ b/notebooks/tutorials/adalflow_dataclasses.ipynb @@ -82,6 +82,17 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "markdown", "metadata": { diff --git a/notebooks/tutorials/adalflow_embedder.ipynb b/notebooks/tutorials/adalflow_embedder.ipynb new file mode 100644 index 000000000..bdd20ba37 --- /dev/null +++ b/notebooks/tutorials/adalflow_embedder.ipynb @@ -0,0 +1,340 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 🤗 Welcome to AdalFlow!\n", + "## The library to build & auto-optimize any LLM task pipelines\n", + "\n", + "Thanks for trying us out, we're here to provide you with the best LLM application development experience you can dream of 😊 any questions or concerns you may have, [come talk to us on discord,](https://discord.gg/ezzszrRZvT) we're always here to help! ⭐ Star us on Github ⭐\n", + "\n", + "\n", + "# Quick Links\n", + "\n", + "Github repo: https://github.com/SylphAI-Inc/AdalFlow\n", + "\n", + "Full Tutorials: https://adalflow.sylph.ai/index.html#.\n", + "\n", + "Deep dive on each API: check out the [developer notes](https://adalflow.sylph.ai/tutorials/index.html).\n", + "\n", + "Common use cases along with the auto-optimization: check out [Use cases](https://adalflow.sylph.ai/use_cases/index.html).\n", + "\n", + "# Author\n", + "\n", + "This notebook was created by community contributor [Name](Replace_to_github_or_other_social_account).\n", + "\n", + "# Outline\n", + "\n", + "This is a quick introduction of what AdalFlow is capable of. We will cover:\n", + "\n", + "* Simple Chatbot with structured output\n", + "* RAG task pipeline + Data processing pipeline\n", + "* Agent\n", + "\n", + "**Next: Try our [auto-optimization](https://colab.research.google.com/drive/1n3mHUWekTEYHiBdYBTw43TKlPN41A9za?usp=sharing)**\n", + "\n", + "\n", + "# Installation\n", + "\n", + "1. Use `pip` to install the `adalflow` Python package. We will need `openai`, `groq`, and `faiss`(cpu version) from the extra packages.\n", + "\n", + " ```bash\n", + " pip install adalflow[openai,groq,faiss-cpu]\n", + " ```\n", + "2. Setup `openai` and `groq` API key in the environment variables" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from IPython.display import clear_output\n", + "\n", + "!pip install -U adalflow[openai,groq,faiss-cpu]\n", + "\n", + "clear_output()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set Environment Variables\n", + "\n", + "Run the following code and pass your api key.\n", + "\n", + "Note: for normal `.py` projects, follow our [official installation guide](https://lightrag.sylph.ai/get_started/installation.html).\n", + "\n", + "*Go to [OpenAI](https://platform.openai.com/docs/introduction) and [Groq](https://console.groq.com/docs/) to get API keys if you don't already have.*" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Embedder \n", + "\n", + "What you will learn?\n", + "\n", + "- What is Embedder and why is it designed this way?\n", + "\n", + "- When to use Embedder and how to use it?\n", + "\n", + "- How to batch processing with BatchEmbedder?\n", + "\n", + "core.embedder.Embedder class is similar to Generator, it is a user-facing component that orchestrates embedding models via ModelClient and output_processors. Compared with using ModelClient directly, Embedder further simplify the interface and output a standard EmbedderOutput format.\n", + "\n", + "By switching the ModelClient, you can use different embedding models in your task pipeline easily, or even embedd different data such as text, image, etc.\n", + "\n", + "# EmbedderOutput\n", + "core.types.EmbedderOutput is a standard output format of Embedder. It is a subclass of DataClass and it contains the following core fields:\n", + "\n", + "data: a list of embeddings, each embedding if of type core.types.Embedding.\n", + "\n", + "error: Error message if any error occurs during the model inference stage. Failure in the output processing stage will raise an exception instead of setting this field.\n", + "\n", + "raw_response: Used for failed model inference.\n", + "\n", + "Additionally, we add three properties to the EmbedderOutput:\n", + "\n", + "length: The number of embeddings in the data.\n", + "\n", + "embedding_dim: The dimension of the embeddings in the data.\n", + "\n", + "is_normalized: Whether the embeddings are normalized to unit vector or not using numpy.\n", + "\n", + "# Embedder in Action\n", + "We currently support all embedding models from OpenAI and ‘thenlper/gte-base’ from HuggingFace transformers. We will use these two to demonstrate how to use Embedder, one from the API provider and the other using local model. For the local model, you might need to ensure transformers is installed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from adalflow.core.embedder import Embedder\n", + "from adalflow.components.model_client import OpenAIClient\n", + "\n", + "model_kwargs = {\n", + " \"model\": \"text-embedding-3-small\",\n", + " \"dimensions\": 256,\n", + " \"encoding_format\": \"float\",\n", + "}\n", + "\n", + "query = \"What is the capital of China?\"\n", + "\n", + "queries = [query] * 100\n", + "\n", + "\n", + "embedder = Embedder(model_client=OpenAIClient(), model_kwargs=model_kwargs)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output = embedder(query)\n", + "print(output.length, output.embedding_dim, output.is_normalized)\n", + "# 1 256 True\n", + "output = embedder(queries)\n", + "print(output.length, output.embedding_dim)\n", + "# 100 256" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Use Local Model" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from adalflow.core.embedder import Embedder\n", + "from adalflow.components.model_client import TransformersClient\n", + "\n", + "model_kwargs = {\"model\": \"thenlper/gte-base\"}\n", + "local_embedder = Embedder(model_client=TransformersClient(), model_kwargs=model_kwargs)\n", + "\n", + "output = local_embedder(query)\n", + "print(output.length, output.embedding_dim, output.is_normalized)\n", + "# 1 768 True\n", + "\n", + "output = local_embedder(queries)\n", + "print(output.length, output.embedding_dim, output.is_normalized)\n", + "# 100 768 True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Use Output Processors\n", + "If we want to decreate the embedding dimension to only 256 to save memory, we can customize an additional output processing step and pass it to embedder via the output_processors argument.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from adalflow.core.types import Embedding, EmbedderOutput\n", + "from adalflow.core.functional import normalize_vector\n", + "from typing import List\n", + "from adalflow.core.component import Component\n", + "from copy import deepcopy\n", + "\n", + "\n", + "class DecreaseEmbeddingDim(Component):\n", + " def __init__(self, old_dim: int, new_dim: int, normalize: bool = True):\n", + " super().__init__()\n", + " self.old_dim = old_dim\n", + " self.new_dim = new_dim\n", + " self.normalize = normalize\n", + " assert self.new_dim < self.old_dim, \"new_dim should be less than old_dim\"\n", + "\n", + " def call(self, input: List[Embedding]) -> List[Embedding]:\n", + " output: EmbedderOutput = deepcopy(input)\n", + " for embedding in output.data:\n", + " old_embedding = embedding.embedding\n", + " new_embedding = old_embedding[: self.new_dim]\n", + " if self.normalize:\n", + " new_embedding = normalize_vector(new_embedding)\n", + " embedding.embedding = new_embedding\n", + " return output.data\n", + "\n", + " def _extra_repr(self) -> str:\n", + " repr_str = f\"old_dim={self.old_dim}, new_dim={self.new_dim}, normalize={self.normalize}\"\n", + " return repr_str" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "local_embedder_256 = Embedder(\n", + " model_client=TransformersClient(),\n", + " model_kwargs=model_kwargs,\n", + " output_processors=DecreaseEmbeddingDim(768, 256),\n", + ")\n", + "print(local_embedder_256)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "output = local_embedder_256(query)\n", + "print(output.length, output.embedding_dim, output.is_normalized)\n", + "# 1 256 True" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Batch Embedding\n", + "\n", + "Especially in data processing pipelines, you can often have more than 1000 queries to embed. We need to chunk our queries into smaller batches to avoid memory overflow. core.embedder.BatchEmbedder is designed to handle this situation. For now, the code is rather simple, but in the future it can be extended to support multi-processing when you use AdalFlow in production data pipeline.\n", + "\n", + "The BatchEmbedder orchestrates the Embedder and handles the batching process. To use it, you need to pass the Embedder and the batch size to the constructor." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from adalflow.core.embedder import BatchEmbedder\n", + "\n", + "batch_embedder = BatchEmbedder(embedder=local_embedder, batch_size=100)\n", + "\n", + "queries = [query] * 1000\n", + "\n", + "response = batch_embedder(queries)\n", + "# 100%|██████████| 11/11 [00:04<00:00, 2.59it/s]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To integrate your own embedding model or from API providers, you need to implement your own subclass of ModelClient.\n", + "\n", + "References\n", + "\n", + "transformers: https://huggingface.co/docs/transformers/en/index\n", + "\n", + "thenlper/gte-base model: https://huggingface.co/thenlper/gte-base\n", + "\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Issues and feedback\n", + "\n", + "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n", + "\n", + "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)." + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/tutorials/adalflow_function_calls.ipynb b/notebooks/tutorials/adalflow_function_calls.ipynb index 6fba3594b..ee53cf072 100644 --- a/notebooks/tutorials/adalflow_function_calls.ipynb +++ b/notebooks/tutorials/adalflow_function_calls.ipynb @@ -1,21 +1,10 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "markdown", + "metadata": { + "id": "lLGpv1fLLIjF" + }, "source": [ "# Function calls\n", "\n", @@ -30,10 +19,7 @@ "- Function call in action\n", "\n", "It follows the tutorial here: https://adalflow.sylph.ai/tutorials/tool_helper.html#" - ], - "metadata": { - "id": "lLGpv1fLLIjF" - } + ] }, { "cell_type": "code", @@ -52,20 +38,18 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", - "\n", - "# Set environment variables\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", - "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ], + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -73,21 +57,39 @@ "id": "-4c_AGBt3PlR", "outputId": "21a26437-9f95-4478-84e9-ba4369956b6f" }, - "execution_count": 2, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Please enter your OpenAI API key: ··········\n", "Please enter your GROQ API key: ··········\n", "API keys have been set.\n" ] } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" ] }, { "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "GMKuuP7xR9Nt" + }, + "outputs": [], "source": [ "from dataclasses import dataclass\n", "from typing import List\n", @@ -136,32 +138,20 @@ "\n", "def add_points(p1: Point, p2: Point) -> Point:\n", " return Point(p1.x + p2.x, p1.y + p2.y)" - ], - "metadata": { - "id": "GMKuuP7xR9Nt" - }, - "execution_count": 4, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "## Function Tool" - ], "metadata": { "id": "jCA7HMjtT16P" - } + }, + "source": [ + "## Function Tool" + ] }, { "cell_type": "code", - "source": [ - "from adalflow.core.func_tool import FunctionTool\n", - "\n", - "functions = [multiply, add, divide, search, numpy_sum, add_points]\n", - "tools = [FunctionTool(fn=fn) for fn in functions]\n", - "for tool in tools:\n", - " print(tool)" - ], + "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -169,11 +159,10 @@ "id": "fgOEoLoDSBqh", "outputId": "7e636e2c-9a5d-44f1-f0fe-fe8a6bea474d" }, - "execution_count": 5, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='multiply', func_desc='multiply(a: int, b: int) -> int\\nMultiply two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']}))\n", "FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add', func_desc='add(a: int, b: int) -> int\\nAdd two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']}))\n", @@ -183,13 +172,19 @@ "FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add_points', func_desc='add_points(p1: __main__.Point, p2: __main__.Point) -> __main__.Point\\nNone', func_parameters={'type': 'object', 'properties': {'p1': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}, 'p2': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}}, 'required': ['p1', 'p2']}))\n" ] } + ], + "source": [ + "from adalflow.core.func_tool import FunctionTool\n", + "\n", + "functions = [multiply, add, divide, search, numpy_sum, add_points]\n", + "tools = [FunctionTool(fn=fn) for fn in functions]\n", + "for tool in tools:\n", + " print(tool)" ] }, { "cell_type": "code", - "source": [ - "print(tools[-2].definition.to_dict())" - ], + "execution_count": 6, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -197,50 +192,47 @@ "id": "CYJaHFhGSEzH", "outputId": "9ab36c6c-7509-4e7f-ce85-11dae889c8c2" }, - "execution_count": 6, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "{'func_name': 'numpy_sum', 'func_desc': 'numpy_sum(arr: numpy.ndarray) -> float\\nSum the elements of an array.', 'func_parameters': {'type': 'object', 'properties': {'arr': {'type': 'ndarray'}}, 'required': ['arr']}}\n" ] } + ], + "source": [ + "print(tools[-2].definition.to_dict())" ] }, { "cell_type": "code", - "source": [ - "context_map = {tool.definition.func_name: tool for tool in tools}" - ], + "execution_count": 7, "metadata": { "id": "_O4bQgXrSKb6" }, - "execution_count": 7, - "outputs": [] + "outputs": [], + "source": [ + "context_map = {tool.definition.func_name: tool for tool in tools}" + ] }, { "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "-RgWWMdISL1u" + }, + "outputs": [], "source": [ "function_name = \"add\"\n", "function_to_call = context_map[function_name]\n", "function_args = {\"a\": 1, \"b\": 2}\n", "function_response = function_to_call.call(**function_args)" - ], - "metadata": { - "id": "-RgWWMdISL1u" - }, - "execution_count": 8, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "from adalflow.core.tool_manager import ToolManager\n", - "\n", - "tool_manager = ToolManager(tools=functions)\n", - "print(tool_manager)" - ], + "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -248,34 +240,34 @@ "id": "6CT7Tez1SOai", "outputId": "e486d882-9179-4db3-f077-6adfc9fc6579" }, - "execution_count": 9, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "ToolManager(Tools: [FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='multiply', func_desc='multiply(a: int, b: int) -> int\\nMultiply two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add', func_desc='add(a: int, b: int) -> int\\nAdd two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: True, definition: FunctionDefinition(func_name='divide', func_desc='divide(a: float, b: float) -> float\\nDivide two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'float'}, 'b': {'type': 'float'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: True, definition: FunctionDefinition(func_name='search', func_desc='search(query: str) -> List[str]\\nSearch for query and return a list of results.', func_parameters={'type': 'object', 'properties': {'query': {'type': 'str'}}, 'required': ['query']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='numpy_sum', func_desc='numpy_sum(arr: numpy.ndarray) -> float\\nSum the elements of an array.', func_parameters={'type': 'object', 'properties': {'arr': {'type': 'ndarray'}}, 'required': ['arr']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add_points', func_desc='add_points(p1: __main__.Point, p2: __main__.Point) -> __main__.Point\\nNone', func_parameters={'type': 'object', 'properties': {'p1': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}, 'p2': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}}, 'required': ['p1', 'p2']}))], Additional Context: {})\n" ] } + ], + "source": [ + "from adalflow.core.tool_manager import ToolManager\n", + "\n", + "tool_manager = ToolManager(tools=functions)\n", + "print(tool_manager)" ] }, { "cell_type": "markdown", - "source": [ - "## ToolManager" - ], "metadata": { "id": "jzFqNnN_T-cu" - } + }, + "source": [ + "## ToolManager" + ] }, { "cell_type": "code", - "source": [ - "from adalflow.core.tool_manager import ToolManager\n", - "\n", - "tool_manager = ToolManager(tools=functions)\n", - "print(tool_manager)" - ], + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -283,28 +275,38 @@ "id": "JX7MibWiUF3U", "outputId": "20707186-5ec3-49a4-d553-c3160c3daa84" }, - "execution_count": 10, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "ToolManager(Tools: [FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='multiply', func_desc='multiply(a: int, b: int) -> int\\nMultiply two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add', func_desc='add(a: int, b: int) -> int\\nAdd two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'int'}, 'b': {'type': 'int'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: True, definition: FunctionDefinition(func_name='divide', func_desc='divide(a: float, b: float) -> float\\nDivide two numbers.', func_parameters={'type': 'object', 'properties': {'a': {'type': 'float'}, 'b': {'type': 'float'}}, 'required': ['a', 'b']})), FunctionTool(fn: , async: True, definition: FunctionDefinition(func_name='search', func_desc='search(query: str) -> List[str]\\nSearch for query and return a list of results.', func_parameters={'type': 'object', 'properties': {'query': {'type': 'str'}}, 'required': ['query']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='numpy_sum', func_desc='numpy_sum(arr: numpy.ndarray) -> float\\nSum the elements of an array.', func_parameters={'type': 'object', 'properties': {'arr': {'type': 'ndarray'}}, 'required': ['arr']})), FunctionTool(fn: , async: False, definition: FunctionDefinition(func_name='add_points', func_desc='add_points(p1: __main__.Point, p2: __main__.Point) -> __main__.Point\\nNone', func_parameters={'type': 'object', 'properties': {'p1': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}, 'p2': {'type': \"{'type': 'Point', 'properties': {'x': {'type': 'int'}, 'y': {'type': 'int'}}, 'required': ['x', 'y']}\"}}, 'required': ['p1', 'p2']}))], Additional Context: {})\n" ] } + ], + "source": [ + "from adalflow.core.tool_manager import ToolManager\n", + "\n", + "tool_manager = ToolManager(tools=functions)\n", + "print(tool_manager)" ] }, { "cell_type": "markdown", - "source": [ - "## Function Call end-to-end" - ], "metadata": { "id": "9Bw2fs--UKX7" - } + }, + "source": [ + "## Function Call end-to-end" + ] }, { "cell_type": "code", + "execution_count": 11, + "metadata": { + "id": "TywPQMIVUOqh" + }, + "outputs": [], "source": [ "template = r\"\"\"You have these tools available:\n", "{% if tools %}\n", @@ -323,24 +325,11 @@ "User: {{input_str}}\n", "You:\n", "\"\"\"" - ], - "metadata": { - "id": "TywPQMIVUOqh" - }, - "execution_count": 11, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "from adalflow.core.prompt_builder import Prompt\n", - "\n", - "prompt = Prompt(template=template)\n", - "small_tool_manager = ToolManager(tools=tools[:2])\n", - "\n", - "renered_prompt = prompt(tools=small_tool_manager.yaml_definitions)\n", - "print(renered_prompt)" - ], + "execution_count": 12, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -348,11 +337,10 @@ "id": "-vMajeXoUQ5A", "outputId": "ca68601b-e9c8-41c3-a6fa-777f225e68e3" }, - "execution_count": 12, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "You have these tools available:\n", "\n", @@ -398,19 +386,20 @@ "\n" ] } - ] - }, - { - "cell_type": "code", + ], "source": [ - "from adalflow.core.types import Function\n", + "from adalflow.core.prompt_builder import Prompt\n", "\n", - "output_data_class = Function\n", - "output_format_str = output_data_class.to_json_signature(exclude=[\"thought\", \"args\"])\n", + "prompt = Prompt(template=template)\n", + "small_tool_manager = ToolManager(tools=tools[:2])\n", "\n", - "renered_prompt = prompt(output_format_str=output_format_str)\n", + "renered_prompt = prompt(tools=small_tool_manager.yaml_definitions)\n", "print(renered_prompt)" - ], + ] + }, + { + "cell_type": "code", + "execution_count": 13, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -418,11 +407,10 @@ "id": "V9-90IFRUUNT", "outputId": "ed2f829e-c656-43c6-a454-8a7c32d5dafe" }, - "execution_count": 13, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "You have these tools available:\n", "\n", @@ -437,17 +425,20 @@ "\n" ] } + ], + "source": [ + "from adalflow.core.types import Function\n", + "\n", + "output_data_class = Function\n", + "output_format_str = output_data_class.to_json_signature(exclude=[\"thought\", \"args\"])\n", + "\n", + "renered_prompt = prompt(output_format_str=output_format_str)\n", + "print(renered_prompt)" ] }, { "cell_type": "code", - "source": [ - "from adalflow.core.types import FunctionExpression\n", - "\n", - "output_data_class = FunctionExpression\n", - "output_format_str = output_data_class.to_json_signature(exclude=[\"thought\"])\n", - "print(prompt(output_format_str=output_format_str))" - ], + "execution_count": 14, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -455,11 +446,10 @@ "id": "p3kPMhWaUYT1", "outputId": "a3de7117-c3eb-404e-e2e7-8a5187b32f6b" }, - "execution_count": 14, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "You have these tools available:\n", "\n", @@ -473,17 +463,18 @@ "\n" ] } + ], + "source": [ + "from adalflow.core.types import FunctionExpression\n", + "\n", + "output_data_class = FunctionExpression\n", + "output_format_str = output_data_class.to_json_signature(exclude=[\"thought\"])\n", + "print(prompt(output_format_str=output_format_str))" ] }, { "cell_type": "code", - "source": [ - "from adalflow.components.output_parsers import JsonOutputParser\n", - "\n", - "func_parser = JsonOutputParser(data_class=Function, exclude_fields=[\"thought\", \"args\"])\n", - "instructions = func_parser.format_instructions()\n", - "print(instructions)" - ], + "execution_count": 17, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -491,11 +482,10 @@ "id": "MvGyoUmMUatR", "outputId": "e819866b-f6e3-4c88-f9f1-22d725a28865" }, - "execution_count": 17, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Your output should be formatted as a standard JSON instance with the following schema:\n", "```\n", @@ -510,19 +500,31 @@ "-Follow the JSON formatting conventions.\n" ] } + ], + "source": [ + "from adalflow.components.output_parsers import JsonOutputParser\n", + "\n", + "func_parser = JsonOutputParser(data_class=Function, exclude_fields=[\"thought\", \"args\"])\n", + "instructions = func_parser.format_instructions()\n", + "print(instructions)" ] }, { "cell_type": "markdown", - "source": [ - "## Function Output Format" - ], "metadata": { "id": "9W7DiGcpUme5" - } + }, + "source": [ + "## Function Output Format" + ] }, { "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "z5tNhoruUp6o" + }, + "outputs": [], "source": [ "from adalflow.core.generator import Generator\n", "from adalflow.core.types import ModelClientType\n", @@ -539,42 +541,11 @@ " prompt_kwargs=prompt_kwargs,\n", " output_processors=func_parser,\n", ")" - ], - "metadata": { - "id": "z5tNhoruUp6o" - }, - "execution_count": 20, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "queries = [\n", - " \"add 2 and 3\",\n", - " \"search for something\",\n", - " \"add points (1, 2) and (3, 4)\",\n", - " \"sum numpy array with arr = np.array([[1, 2], [3, 4]])\",\n", - " \"multiply 2 with local variable x\",\n", - " \"divide 2 by 3\",\n", - " \"Add 5 to variable y\",\n", - "]\n", - "\n", - "for idx, query in enumerate(queries):\n", - " prompt_kwargs = {\"input_str\": query}\n", - " print(f\"\\n{idx} Query: {query}\")\n", - " print(f\"{'-'*50}\")\n", - " try:\n", - " result = generator(prompt_kwargs=prompt_kwargs)\n", - " # print(f\"LLM raw output: {result.raw_response}\")\n", - " func = Function.from_dict(result.data)\n", - " print(f\"Function: {func}\")\n", - " func_output = tool_manager.execute_func(func)\n", - " print(f\"Function output: {func_output}\")\n", - " except Exception as e:\n", - " print(\n", - " f\"Failed to execute the function for query: {query}, func: {result.data}, error: {e}\"\n", - " )" - ], + "execution_count": 21, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -582,11 +553,10 @@ "id": "9DCukn1SUs_x", "outputId": "dcfd952c-0699-4d79-ee6d-a59373e3c75d" }, - "execution_count": 21, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "\n", "0 Query: add 2 and 3\n", @@ -604,15 +574,15 @@ ] }, { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "ERROR:adalflow.core.func_tool:Error at calling : 'dict' object has no attribute 'x'\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Function: Function(thought=None, name='add_points', args=[], kwargs={'p1': {'x': 1, 'y': 2}, 'p2': {'x': 3, 'y': 4}})\n", "Function output: FunctionOutput(name='add_points', input=Function(thought=None, name='add_points', args=(), kwargs={'p1': {'x': 1, 'y': 2}, 'p2': {'x': 3, 'y': 4}}), parsed_input=None, output=None, error=\"'dict' object has no attribute 'x'\")\n", @@ -638,62 +608,94 @@ ] }, { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "ERROR:adalflow.core.func_tool:Error at calling : unsupported operand type(s) for +: 'int' and 'str'\n" ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Function output: FunctionOutput(name='add', input=Function(thought=None, name='add', args=(), kwargs={'a': 5, 'b': 'y'}), parsed_input=None, output=None, error=\"unsupported operand type(s) for +: 'int' and 'str'\")\n" ] } + ], + "source": [ + "queries = [\n", + " \"add 2 and 3\",\n", + " \"search for something\",\n", + " \"add points (1, 2) and (3, 4)\",\n", + " \"sum numpy array with arr = np.array([[1, 2], [3, 4]])\",\n", + " \"multiply 2 with local variable x\",\n", + " \"divide 2 by 3\",\n", + " \"Add 5 to variable y\",\n", + "]\n", + "\n", + "for idx, query in enumerate(queries):\n", + " prompt_kwargs = {\"input_str\": query}\n", + " print(f\"\\n{idx} Query: {query}\")\n", + " print(f\"{'-'*50}\")\n", + " try:\n", + " result = generator(prompt_kwargs=prompt_kwargs)\n", + " # print(f\"LLM raw output: {result.raw_response}\")\n", + " func = Function.from_dict(result.data)\n", + " print(f\"Function: {func}\")\n", + " func_output = tool_manager.execute_func(func)\n", + " print(f\"Function output: {func_output}\")\n", + " except Exception as e:\n", + " print(\n", + " f\"Failed to execute the function for query: {query}, func: {result.data}, error: {e}\"\n", + " )" ] }, { "cell_type": "markdown", - "source": [ - "## FunctionExpression Output Format" - ], "metadata": { "id": "O-sBTPATUwsD" - } + }, + "source": [ + "## FunctionExpression Output Format" + ] }, { "cell_type": "code", + "execution_count": 22, + "metadata": { + "id": "TVRZ44N1UyWg" + }, + "outputs": [], "source": [ "tool_manager = ToolManager(\n", " tools=functions,\n", " additional_context={\"x\": x, \"y\": 0, \"np.array\": np.array, \"np\": np},\n", ")\n", "func_parser = JsonOutputParser(data_class=FunctionExpression)" - ], - "metadata": { - "id": "TVRZ44N1UyWg" - }, - "execution_count": 22, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 23, + "metadata": { + "id": "9h47p4XpU2BC" + }, + "outputs": [], "source": [ "context = r\"\"\"\n", "Your function expression also have access to these context:\n", "{{context_str}}\n", "\n", "\"\"\"" - ], - "metadata": { - "id": "9h47p4XpU2BC" - }, - "execution_count": 23, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 24, + "metadata": { + "id": "n9Qq7wcOU4X9" + }, + "outputs": [], "source": [ "async def run_async_function_call(self, generator, tool_manager):\n", " answers = []\n", @@ -725,12 +727,21 @@ " f\"Failed to execute the function for query: {query}, func: {result.data}, error: {e}\"\n", " )\n", " return None" - ], - "metadata": { - "id": "n9Qq7wcOU4X9" - }, - "execution_count": 24, - "outputs": [] + ] + } + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" } - ] + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/notebooks/tutorials/adalflow_logger.ipynb b/notebooks/tutorials/adalflow_logger.ipynb index ae5a7d83b..64fd01147 100644 --- a/notebooks/tutorials/adalflow_logger.ipynb +++ b/notebooks/tutorials/adalflow_logger.ipynb @@ -1,21 +1,10 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "markdown", + "metadata": { + "id": "lLGpv1fLLIjF" + }, "source": [ "# Adalflow RAG Playbook example\n", "\n", @@ -26,10 +15,7 @@ "- RAG with dynamic data access and caching the embedding dynamically in a local storage.\n", "\n", "Here we will have have a look at an example with a local DB using FAISS" - ], - "metadata": { - "id": "lLGpv1fLLIjF" - } + ] }, { "cell_type": "code", @@ -48,20 +34,18 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", - "\n", - "# Set environment variables\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", - "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ], + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -69,21 +53,37 @@ "id": "-4c_AGBt3PlR", "outputId": "275b050a-ce64-4b40-a5f9-4ccc12d92add" }, - "execution_count": 2, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Please enter your OpenAI API key: ··········\n", "Please enter your GROQ API key: ··········\n", "API keys have been set.\n" ] } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" ] }, { "cell_type": "markdown", + "metadata": { + "id": "4NztjiLR_EQE" + }, "source": [ "## Design\n", "\n", @@ -96,45 +96,38 @@ "2. Additionally, as we can’t always control the outputs of generators, we will provide customized logger and tracers(drop-in decorators) for them, for which we will explain in Tracing. This will not break the first objective.\n", "\n", "In the future, when we have more complex requirements from users, we will consider adding hooks/callbacks but we will do it in a way to keep the functional and user-facing APIs clean." - ], - "metadata": { - "id": "4NztjiLR_EQE" - } + ] }, { "cell_type": "code", + "execution_count": 3, + "metadata": { + "id": "d2H1vYoC_F-g" + }, + "outputs": [], "source": [ "import logging\n", "\n", "log = logging.getLogger(__name__)" - ], - "metadata": { - "id": "d2H1vYoC_F-g" - }, - "execution_count": 3, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "e2GxAapG_TJH" + }, + "outputs": [], "source": [ "from adalflow.utils.logger import get_logger\n", "\n", "\n", "root_logger = get_logger()" - ], - "metadata": { - "id": "e2GxAapG_TJH" - }, - "execution_count": 4, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "from adalflow.utils.logger import printc\n", - "\n", - "printc(\"All logging examples are done. Feeling green!\", color=\"green\")" - ], + "execution_count": 5, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -142,30 +135,39 @@ "id": "Yk4oiBFE_asG", "outputId": "470e30dc-1b31-40c1-9e48-30754ae54b45" }, - "execution_count": 5, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "\u001b[32m2024-11-28 13:39:41 - [:3:] - All logging examples are done. Feeling green!\u001b[0m\n" ] } + ], + "source": [ + "from adalflow.utils.logger import printc\n", + "\n", + "printc(\"All logging examples are done. Feeling green!\", color=\"green\")" ] }, { "cell_type": "markdown", + "metadata": { + "id": "B8lmlT_9_nVP" + }, "source": [ "Set up all logs in one file\n", "\n", "Assume your source code is at src/task.py. You can log simply by:" - ], - "metadata": { - "id": "B8lmlT_9_nVP" - } + ] }, { "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "o_Ru1myM_c-J" + }, + "outputs": [], "source": [ "import logging\n", "\n", @@ -175,26 +177,11 @@ "class Task:\n", " def __init__(self):\n", " log.info(\"This is a user program child logger\")" - ], - "metadata": { - "id": "o_Ru1myM_c-J" - }, - "execution_count": 6, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "import logging\n", - "from adalflow.utils.logger import get_logger\n", - "\n", - "root_logger = get_logger(level=\"DEBUG\", save_dir=\"./logs\") # log to ./logs/lib.log\n", - "\n", - "# run code from the library components such as generator\n", - "# ....\n", - "\n", - "root_logger.info(\"This is the log in the main file\")" - ], + "execution_count": 7, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -202,28 +189,43 @@ "id": "o7YPjEZk_ehg", "outputId": "ad0f58e9-6f5c-4d00-e737-2fa1ad5ebd85" }, - "execution_count": 7, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "2024-11-28 13:39:46 - - INFO - [:9:] - This is the log in the main file\n" ] } + ], + "source": [ + "import logging\n", + "from adalflow.utils.logger import get_logger\n", + "\n", + "root_logger = get_logger(level=\"DEBUG\", save_dir=\"./logs\") # log to ./logs/lib.log\n", + "\n", + "# run code from the library components such as generator\n", + "# ....\n", + "\n", + "root_logger.info(\"This is the log in the main file\")" ] }, { "cell_type": "markdown", - "source": [ - "Separate library and application logs" - ], "metadata": { "id": "Db1_Ob3X_gpe" - } + }, + "source": [ + "Separate library and application logs" + ] }, { "cell_type": "code", + "execution_count": 8, + "metadata": { + "id": "rQWuFnUc_gNm" + }, + "outputs": [], "source": [ "from adalflow.utils.logger import get_logger\n", "\n", @@ -235,12 +237,21 @@ "class Task:\n", " def __init__(self):\n", " app_logger.info(\"This is a user program child logger\")" - ], - "metadata": { - "id": "rQWuFnUc_gNm" - }, - "execution_count": 8, - "outputs": [] + ] } - ] + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/notebooks/tutorials/adalflow_modelclient.ipynb b/notebooks/tutorials/adalflow_modelclient.ipynb index 1674c69af..05ff5f3d0 100644 --- a/notebooks/tutorials/adalflow_modelclient.ipynb +++ b/notebooks/tutorials/adalflow_modelclient.ipynb @@ -87,6 +87,17 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "markdown", "metadata": { @@ -2032,6 +2043,283 @@ "build_custom_model_client()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Adalflow multimodal model client" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def analyze_single_image():\n", + " \"\"\"Example of analyzing a single image with GPT-4 Vision\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"images\": \"https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/images/happy_cat.jpg\",\n", + " \"max_tokens\": 300,\n", + " },\n", + " )\n", + "\n", + " response = gen(\n", + " {\"input_str\": \"What do you see in this image? Be detailed but concise.\"}\n", + " )\n", + " print(\"\\n=== Single Image Analysis ===\")\n", + " print(f\"Description: {response.raw_response}\")\n", + "\n", + "\n", + "def analyze_multiple_images():\n", + " \"\"\"Example of analyzing multiple images in one prompt\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " # List of images to analyze together\n", + " images = [\n", + " \"https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/images/happy_cat.jpg\",\n", + " \"https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/images/sad_cat.jpg\",\n", + " ]\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\"model\": \"gpt-4o-mini\", \"images\": images, \"max_tokens\": 300},\n", + " )\n", + "\n", + " response = gen(\n", + " {\n", + " \"input_str\": \"Compare and contrast these two images. What are the main differences?\"\n", + " }\n", + " )\n", + " print(\"\\n=== Multiple Images Analysis ===\")\n", + " print(f\"Comparison: {response.raw_response}\")\n", + "\n", + "\n", + "def generate_art_with_dalle():\n", + " \"\"\"Example of generating art using DALL-E 3\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"dall-e-3\",\n", + " \"size\": \"1024x1024\",\n", + " \"quality\": \"standard\",\n", + " \"n\": 1,\n", + " },\n", + " )\n", + "\n", + " response = gen(\n", + " {\n", + " \"input_str\": \"A serene Japanese garden with a small bridge over a koi pond, cherry blossoms falling gently in the breeze\"\n", + " }\n", + " )\n", + " print(\"\\n=== Art Generation with DALL-E 3 ===\")\n", + " print(f\"Generated Image URL: {response.data}\")\n", + "\n", + "\n", + "def create_image_variations(image_path=\"path/to/your/image.jpg\"):\n", + " \"\"\"Example of creating variations of an existing image\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"dall-e-2\",\n", + " \"image\": image_path,\n", + " \"n\": 2, # Generate 2 variations\n", + " \"size\": \"1024x1024\",\n", + " },\n", + " )\n", + "\n", + " response = gen({\"input_str\": \"\"})\n", + " print(\"\\n=== Image Variations ===\")\n", + " print(f\"Variation URLs: {response.data}\")\n", + "\n", + "\n", + "def edit_image_with_mask(image_path=\"path/to/image.jpg\", mask_path=\"path/to/mask.jpg\"):\n", + " \"\"\"Example of editing specific parts of an image using a mask\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"dall-e-2\",\n", + " \"image\": image_path,\n", + " \"mask\": mask_path,\n", + " \"n\": 1,\n", + " \"size\": \"1024x1024\",\n", + " },\n", + " )\n", + "\n", + " response = gen({\"input_str\": \"Replace the masked area with a beautiful sunset\"})\n", + " print(\"\\n=== Image Editing ===\")\n", + " print(f\"Edited Image URL: {response.data}\")\n", + "\n", + "\n", + "def mixed_image_text_conversation():\n", + " \"\"\"Example of having a conversation that includes both images and text\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"images\": [\n", + " \"https://raw.githubusercontent.com/openai/openai-cookbook/main/examples/images/happy_cat.jpg\",\n", + " \"https://path/to/local/image.jpg\", # Replace with your local image path\n", + " ],\n", + " \"max_tokens\": 300,\n", + " },\n", + " )\n", + "\n", + " conversation = \"\"\"You are a helpful assistant skilled in analyzing images and providing detailed descriptions.\n", + " I'm showing you two images. Please analyze them and tell me what emotions they convey.\"\"\"\n", + "\n", + " response = gen({\"input_str\": conversation})\n", + " print(\"\\n=== Mixed Image-Text Conversation ===\")\n", + " print(f\"Assistant's Analysis: {response.raw_response}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " print(\"OpenAI Image Processing Examples\\n\")\n", + "\n", + " # Basic image analysis\n", + " analyze_single_image()\n", + "\n", + " # Multiple image analysis\n", + " analyze_multiple_images()\n", + "\n", + " # Image generation\n", + " generate_art_with_dalle()\n", + "\n", + " # create_image_variations()\n", + " # edit_image_with_mask(, )\n", + " # mixed_image_text_conversation()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Image generation with Dall E and image understanding" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from adalflow.core import Generator\n", + "from adalflow.components.model_client.openai_client import OpenAIClient\n", + "from adalflow.core.types import ModelType" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class ImageGenerator(Generator):\n", + " \"\"\"Generator subclass for image generation.\"\"\"\n", + "\n", + " model_type = ModelType.IMAGE_GENERATION\n", + "\n", + "\n", + "def test_vision_and_generation():\n", + " \"\"\"Test both vision analysis and image generation\"\"\"\n", + " client = OpenAIClient()\n", + "\n", + " # 1. Test Vision Analysis\n", + " vision_gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"images\": \"https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png\",\n", + " \"max_tokens\": 300,\n", + " },\n", + " )\n", + "\n", + " vision_response = vision_gen(\n", + " {\"input_str\": \"What do you see in this image? Be detailed but concise.\"}\n", + " )\n", + " print(\"\\n=== Vision Analysis ===\")\n", + " print(f\"Description: {vision_response.raw_response}\")\n", + "\n", + " # 2. Test DALL-E Image Generation\n", + " dalle_gen = ImageGenerator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"dall-e-3\",\n", + " \"size\": \"1024x1024\",\n", + " \"quality\": \"standard\",\n", + " \"n\": 1,\n", + " },\n", + " )\n", + "\n", + " # For image generation, input_str becomes the prompt\n", + " response = dalle_gen(\n", + " {\"input_str\": \"A happy siamese cat playing with a red ball of yarn\"}\n", + " )\n", + " print(\"\\n=== DALL-E Generation ===\")\n", + " print(f\"Generated Image URL: {response.data}\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Invalid image url - Generator output still works!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def test_invalid_image_url():\n", + " \"\"\"Test Generator output with invalid image URL\"\"\"\n", + " client = OpenAIClient()\n", + " gen = Generator(\n", + " model_client=client,\n", + " model_kwargs={\n", + " \"model\": \"gpt-4o-mini\",\n", + " \"images\": \"https://invalid.url/nonexistent.jpg\",\n", + " \"max_tokens\": 300,\n", + " },\n", + " )\n", + "\n", + " print(\"\\n=== Testing Invalid Image URL ===\")\n", + " response = gen({\"input_str\": \"What do you see in this image?\"})\n", + " print(f\"Response with invalid image URL: {response}\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if __name__ == \"__main__\":\n", + " print(\"Starting OpenAI Vision and DALL-E test...\\n\")\n", + " test_invalid_image_url()\n", + " test_vision_and_generation()" + ] + }, { "cell_type": "markdown", "metadata": { diff --git a/notebooks/tutorials/adalflow_rag_optimization.ipynb b/notebooks/tutorials/adalflow_rag_optimization.ipynb index 34d208bfa..2dbcc579e 100644 --- a/notebooks/tutorials/adalflow_rag_optimization.ipynb +++ b/notebooks/tutorials/adalflow_rag_optimization.ipynb @@ -1,21 +1,10 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "markdown", + "metadata": { + "id": "xHF95Kr4CzGq" + }, "source": [ "# 🤗 Welcome to AdalFlow!\n", "## The PyTorch library to auto-optimize any LLM task pipelines\n", @@ -44,13 +33,13 @@ "- Build the standard RAG with Retriever and Generator components.\n", "\n", "- Learn how to connect the output-input between components to enable auto-text-grad optimization." - ], - "metadata": { - "id": "xHF95Kr4CzGq" - } + ] }, { "cell_type": "markdown", + "metadata": { + "id": "Kof5M6DRaKhh" + }, "source": [ "\n", "# Installation\n", @@ -63,10 +52,7 @@ "2. Setup `openai` and `groq` API key in the environment variables\n", "\n", "You can choose to use different client. You can import the model client you prefer. We support `Anthropic`, `Cohere`, `Google`, `GROQ`, `OpenAI`, `Transformer` and more in development. We will use OpenAI here as an example.Please refer to our [full installation guide](https://adalflow.sylph.ai/get_started/installation.html)" - ], - "metadata": { - "id": "Kof5M6DRaKhh" - } + ] }, { "cell_type": "code", @@ -84,8 +70,22 @@ "clear_output()" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "markdown", + "metadata": { + "id": "KapUyHMM07pJ" + }, "source": [ "## Set Environment Variables\n", "\n", @@ -94,27 +94,11 @@ "Note: for normal `.py` projects, follow our [official installation guide](https://lightrag.sylph.ai/get_started/installation.html).\n", "\n", "*Go to [OpenAI](https://platform.openai.com/docs/introduction) to get API keys if you don't already have.*" - ], - "metadata": { - "id": "KapUyHMM07pJ" - } + ] }, { "cell_type": "code", - "source": [ - "import os\n", - "\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "\n", - "\n", - "# Set environment variables\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ], + "execution_count": 3, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -122,20 +106,38 @@ "id": "ONfzF9Puzdd_", "outputId": "5fc0cd30-9ae7-443a-c06c-31e9edeafd69" }, - "execution_count": 3, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Please enter your OpenAI API key: ··········\n", "API keys have been set.\n" ] } + ], + "source": [ + "import os\n", + "\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "\n", + "print(\"API keys have been set.\")" ] }, { "cell_type": "code", + "execution_count": 20, + "metadata": { + "id": "aE3I05BqOmd7" + }, + "outputs": [], "source": [ "import dspy\n", "import re\n", @@ -150,15 +152,15 @@ "from adalflow.core.retriever import Retriever\n", "from adalflow.core.component import fun_to_component\n", "from adalflow.components.model_client.openai_client import OpenAIClient" - ], - "metadata": { - "id": "aE3I05BqOmd7" - }, - "execution_count": 20, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "cqUUoua9fUxQ" + }, + "outputs": [], "source": [ "gpt_4o_model = {\n", " \"model_client\": OpenAIClient(),\n", @@ -175,15 +177,37 @@ " \"max_tokens\": 2000,\n", " },\n", "}" - ], - "metadata": { - "id": "cqUUoua9fUxQ" - }, - "execution_count": null, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 22, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "0irHeHUkOmL8", + "outputId": "61f778a2-9ec1-4fda-daa2-bcc7f31baa78" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "HotPotQAData(id='5a8b57f25542995d1e6f1371', question='Were Scott Derrickson and Ed Wood of the same nationality?', answer='yes', gold_titles=\"{'Scott Derrickson', 'Ed Wood'}\") \n" + ] + }, + { + "data": { + "text/plain": [ + "HotPotQAData(id='5a8b57f25542995d1e6f1371', question='Were Scott Derrickson and Ed Wood of the same nationality?', answer='yes', gold_titles=\"{'Scott Derrickson', 'Ed Wood'}\")" + ] + }, + "execution_count": 22, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "def load_datasets():\n", "\n", @@ -215,37 +239,15 @@ " answer=\"yes\",\n", " gold_titles=\"{'Scott Derrickson', 'Ed Wood'}\",\n", ")" - ], - "metadata": { - "colab": { - "base_uri": "https://localhost:8080/" - }, - "id": "0irHeHUkOmL8", - "outputId": "61f778a2-9ec1-4fda-daa2-bcc7f31baa78" - }, - "execution_count": 22, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "HotPotQAData(id='5a8b57f25542995d1e6f1371', question='Were Scott Derrickson and Ed Wood of the same nationality?', answer='yes', gold_titles=\"{'Scott Derrickson', 'Ed Wood'}\") \n" - ] - }, - { - "output_type": "execute_result", - "data": { - "text/plain": [ - "HotPotQAData(id='5a8b57f25542995d1e6f1371', question='Were Scott Derrickson and Ed Wood of the same nationality?', answer='yes', gold_titles=\"{'Scott Derrickson', 'Ed Wood'}\")" - ] - }, - "metadata": {}, - "execution_count": 22 - } ] }, { "cell_type": "code", + "execution_count": 23, + "metadata": { + "id": "ZZIEtZYHNVjo" + }, + "outputs": [], "source": [ "class DspyRetriever(adal.Retriever):\n", " def __init__(self, top_k: int = 3):\n", @@ -474,25 +476,34 @@ " trainer.diagnose(dataset=trainset, split=\"train\")\n", " # trainer.diagnose(dataset=valset, split=\"val\")\n", " # trainer.diagnose(dataset=testset, split=\"test\")" - ], - "metadata": { - "id": "ZZIEtZYHNVjo" - }, - "execution_count": 23, - "outputs": [] + ] }, { "cell_type": "markdown", + "metadata": { + "id": "AmkbyxmuruUu" + }, "source": [ "# Issues and feedback\n", "\n", "If you encounter any issues, please report them here: [GitHub Issues](https://github.com/SylphAI-Inc/LightRAG/issues).\n", "\n", "For feedback, you can use either the [GitHub discussions](https://github.com/SylphAI-Inc/LightRAG/discussions) or [Discord](https://discord.gg/ezzszrRZvT)." - ], - "metadata": { - "id": "AmkbyxmuruUu" - } + ] } - ] + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/notebooks/tutorials/adalflow_rag_playbook.ipynb b/notebooks/tutorials/adalflow_rag_playbook.ipynb index 308ade6e4..d44554748 100644 --- a/notebooks/tutorials/adalflow_rag_playbook.ipynb +++ b/notebooks/tutorials/adalflow_rag_playbook.ipynb @@ -1,21 +1,10 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "markdown", + "metadata": { + "id": "lLGpv1fLLIjF" + }, "source": [ "# Adalflow RAG Playbook example\n", "\n", @@ -26,10 +15,7 @@ "- RAG with dynamic data access and caching the embedding dynamically in a local storage.\n", "\n", "Here we will have have a look at an example with a local DB using FAISS" - ], - "metadata": { - "id": "lLGpv1fLLIjF" - } + ] }, { "cell_type": "code", @@ -48,20 +34,18 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", - "\n", - "# Set environment variables\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", - "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ], + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -69,21 +53,39 @@ "id": "-4c_AGBt3PlR", "outputId": "a36f157b-0b18-4f3d-d5a8-09aa94743922" }, - "execution_count": 2, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Please enter your OpenAI API key: ··········\n", "Please enter your GROQ API key: ··········\n", "API keys have been set.\n" ] } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" ] }, { "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "V9LsGDnm3RbV" + }, + "outputs": [], "source": [ "from typing import Any, List, Optional\n", "import os\n", @@ -99,15 +101,15 @@ " TextSplitter,\n", ")\n", "from adalflow.utils.global_config import get_adalflow_default_root_path" - ], - "metadata": { - "id": "V9LsGDnm3RbV" - }, - "execution_count": 4, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 5, + "metadata": { + "id": "kWGTZxrw3Tli" + }, + "outputs": [], "source": [ "configs = {\n", " \"embedder\": {\n", @@ -135,15 +137,15 @@ " \"chunk_overlap\": 200,\n", " },\n", "}" - ], - "metadata": { - "id": "kWGTZxrw3Tli" - }, - "execution_count": 5, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "1QE0PCKs4BLz" + }, + "outputs": [], "source": [ "def prepare_data_pipeline():\n", " splitter = TextSplitter(**configs[\"text_splitter\"])\n", @@ -172,15 +174,15 @@ " data_transformer = prepare_data_pipeline()\n", " db.transform(data_transformer, key=\"data_transformer\")\n", " db.save_state(index_path)" - ], - "metadata": { - "id": "1QE0PCKs4BLz" - }, - "execution_count": 6, - "outputs": [] + ] }, { "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "6Mu1HXhy4DIG" + }, + "outputs": [], "source": [ "RAG_PROMPT_TEMPLATE = r\"\"\"\n", "{{task_desc}}\n", @@ -284,42 +286,11 @@ " print(f\"context_str: \\n {context_str}\")\n", "\n", " return self.generate(query, context=context_str)" - ], - "metadata": { - "id": "6Mu1HXhy4DIG" - }, - "execution_count": 7, - "outputs": [] + ] }, { "cell_type": "code", - "source": [ - "# Prepare initial documents\n", - "doc1 = Document(\n", - " meta_data={\"title\": \"Li Yin's profile\"},\n", - " text=\"My name is Li Yin, I love rock climbing\" + \"lots of nonsense text\" * 500,\n", - " id=\"doc1\",\n", - ")\n", - "doc2 = Document(\n", - " meta_data={\"title\": \"Interviewing Li Yin\"},\n", - " text=\"lots of more nonsense text\" * 250\n", - " + \"Li Yin is an AI researcher and a software engineer\"\n", - " + \"lots of more nonsense text\" * 250,\n", - " id=\"doc2\",\n", - ")\n", - "\n", - "# Prepare the database (only runs once)\n", - "prepare_database_with_index([doc1, doc2], index_file=\"index.faiss\")\n", - "\n", - "# Initialize RAG\n", - "rag = RAG(index_file=\"index.faiss\")\n", - "print(rag)\n", - "\n", - "# Query the RAG system\n", - "query = \"What is Li Yin's hobby and profession?\"\n", - "response = rag.call(query)\n", - "print(f\"Response: {response}\")" - ], + "execution_count": 8, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -327,11 +298,10 @@ "id": "sPnx4PY34D1j", "outputId": "f66d6f1a-70bf-40e9-a160-591fcfdcbed3" }, - "execution_count": 8, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "Splitting Documents in Batches: 100%|██████████| 1/1 [00:00<00:00, 109.58it/s]\n", "Batch embedding documents: 100%|██████████| 1/1 [00:01<00:00, 1.33s/it]\n", @@ -339,8 +309,8 @@ ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Saved the state of the DB to /root/.adalflow/index.faiss\n", "RAG(\n", @@ -435,33 +405,38 @@ "Response: (GeneratorOutput(id=None, data={'answer': \"Li Yin's hobby is rock climbing and profession is an AI researcher and a software engineer.\"}, error=None, usage=CompletionUsage(completion_tokens=25, prompt_tokens=2713, total_tokens=2738), raw_response='{\\n \"answer\": \"Li Yin\\'s hobby is rock climbing and profession is an AI researcher and a software engineer.\"\\n}', metadata=None), ' My name is Li Yin, I love rock climbinglots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textLi Yin is an AI researcher and a software engineerlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more textLi Yin is an AI researcher and a software engineerlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense ')\n" ] } - ] - }, - { - "cell_type": "code", + ], "source": [ - "# Add more documents at runtime\n", - "doc3 = Document(\n", - " meta_data={\"title\": \"Apple's profile\"},\n", - " text=\"Apple is a cute dog with black and tan fur\" + \"lots of nonsense text\" * 500,\n", - " id=\"doc3\",\n", + "# Prepare initial documents\n", + "doc1 = Document(\n", + " meta_data={\"title\": \"Li Yin's profile\"},\n", + " text=\"My name is Li Yin, I love rock climbing\" + \"lots of nonsense text\" * 500,\n", + " id=\"doc1\",\n", ")\n", - "doc4 = Document(\n", - " meta_data={\"title\": \"Apple's characteristics\"},\n", + "doc2 = Document(\n", + " meta_data={\"title\": \"Interviewing Li Yin\"},\n", " text=\"lots of more nonsense text\" * 250\n", - " + \"Apple is energetic, loves to play with her monkey toy\"\n", + " + \"Li Yin is an AI researcher and a software engineer\"\n", " + \"lots of more nonsense text\" * 250,\n", - " id=\"doc4\",\n", + " id=\"doc2\",\n", ")\n", "\n", - "rag.add_documents([doc3, doc4])\n", - "rag.prepare_retriever()\n", + "# Prepare the database (only runs once)\n", + "prepare_database_with_index([doc1, doc2], index_file=\"index.faiss\")\n", "\n", - "# Test a new query\n", - "query = \"What is Apple's favorite toy?\"\n", + "# Initialize RAG\n", + "rag = RAG(index_file=\"index.faiss\")\n", + "print(rag)\n", + "\n", + "# Query the RAG system\n", + "query = \"What is Li Yin's hobby and profession?\"\n", "response = rag.call(query)\n", "print(f\"Response: {response}\")" - ], + ] + }, + { + "cell_type": "code", + "execution_count": 9, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -469,11 +444,10 @@ "id": "bcC1-dCheVEC", "outputId": "133bab3f-ff2e-40db-99dc-71d64af6283f" }, - "execution_count": 9, "outputs": [ { - "output_type": "stream", "name": "stderr", + "output_type": "stream", "text": [ "Splitting Documents in Batches: 100%|██████████| 1/1 [00:00<00:00, 114.76it/s]\n", "Batch embedding documents: 100%|██████████| 1/1 [00:00<00:00, 1.35it/s]\n", @@ -481,25 +455,41 @@ ] }, { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Saved the state of the DB to /root/.adalflow/index.faiss\n", "Response: (GeneratorOutput(id=None, data={'answer': \"Apple's favorite toy is her monkey toy.\"}, error=None, usage=CompletionUsage(completion_tokens=16, prompt_tokens=2647, total_tokens=2663), raw_response='{\\n \"answer\": \"Apple\\'s favorite toy is her monkey toy.\"\\n}', metadata=None), ' Apple is a cute dog with black and tan furlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots of nonsense textlots textApple is energetic, loves to play with her monkey toylots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textApple is energetic, loves to play with her monkey toylots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textLi Yin is an AI researcher and a software engineerlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more textLi Yin is an AI researcher and a software engineerlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more ')\n" ] } + ], + "source": [ + "# Add more documents at runtime\n", + "doc3 = Document(\n", + " meta_data={\"title\": \"Apple's profile\"},\n", + " text=\"Apple is a cute dog with black and tan fur\" + \"lots of nonsense text\" * 500,\n", + " id=\"doc3\",\n", + ")\n", + "doc4 = Document(\n", + " meta_data={\"title\": \"Apple's characteristics\"},\n", + " text=\"lots of more nonsense text\" * 250\n", + " + \"Apple is energetic, loves to play with her monkey toy\"\n", + " + \"lots of more nonsense text\" * 250,\n", + " id=\"doc4\",\n", + ")\n", + "\n", + "rag.add_documents([doc3, doc4])\n", + "rag.prepare_retriever()\n", + "\n", + "# Test a new query\n", + "query = \"What is Apple's favorite toy?\"\n", + "response = rag.call(query)\n", + "print(f\"Response: {response}\")" ] }, { "cell_type": "code", - "source": [ - "# View all documents in the database\n", - "print(\"All documents in the database:\")\n", - "for item in rag.db.items:\n", - " print(\n", - " f\"ID: {item.id}, Title: {item.meta_data['title']}, Text: {item.text[:100]}...\"\n", - " )" - ], + "execution_count": 10, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -507,11 +497,10 @@ "id": "o9TzVv5GeZZ2", "outputId": "bde56355-186c-4013-d702-b4530f82881b" }, - "execution_count": 10, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "All documents in the database:\n", "ID: doc1, Title: Li Yin's profile, Text: My name is Li Yin, I love rock climbinglots of nonsense textlots of nonsense textlots of nonsense te...\n", @@ -520,7 +509,29 @@ "ID: doc4, Title: Apple's characteristics, Text: lots of more nonsense textlots of more nonsense textlots of more nonsense textlots of more nonsense ...\n" ] } + ], + "source": [ + "# View all documents in the database\n", + "print(\"All documents in the database:\")\n", + "for item in rag.db.items:\n", + " print(\n", + " f\"ID: {item.id}, Title: {item.meta_data['title']}, Text: {item.text[:100]}...\"\n", + " )" ] } - ] + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/notebooks/tutorials/adalflow_text_splitter.ipynb b/notebooks/tutorials/adalflow_text_splitter.ipynb index 4008f45a9..7d5a05bdd 100644 --- a/notebooks/tutorials/adalflow_text_splitter.ipynb +++ b/notebooks/tutorials/adalflow_text_splitter.ipynb @@ -11,6 +11,17 @@ "!pip install adalflow[openai,groq,faiss-cpu]" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/tutorials/adalflow_tracing.ipynb b/notebooks/tutorials/adalflow_tracing.ipynb index ef3d2b255..fbb6f9cea 100644 --- a/notebooks/tutorials/adalflow_tracing.ipynb +++ b/notebooks/tutorials/adalflow_tracing.ipynb @@ -1,31 +1,17 @@ { - "nbformat": 4, - "nbformat_minor": 0, - "metadata": { - "colab": { - "provenance": [] - }, - "kernelspec": { - "name": "python3", - "display_name": "Python 3" - }, - "language_info": { - "name": "python" - } - }, "cells": [ { "cell_type": "markdown", + "metadata": { + "id": "lLGpv1fLLIjF" + }, "source": [ "# Tracing\n", "\n", "In particular, we provide two tracing methods to help you develop and improve the Generator:\n", "\n", "1. Trace the history change(states) on prompt during your development process. Developers typically go through a long process of prompt optimization and it is frustrating to lose track of the prompt changes when your current change actually makes the performance much worse.\n" - ], - "metadata": { - "id": "lLGpv1fLLIjF" - } + ] }, { "cell_type": "code", @@ -44,20 +30,18 @@ }, { "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ - "import os\n", - "from getpass import getpass\n", - "\n", - "# Prompt user to enter their API keys securely\n", - "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", - "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", - "\n", - "# Set environment variables\n", - "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", - "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", - "\n", - "print(\"API keys have been set.\")" - ], + "!pip uninstall httpx anyio -y\n", + "!pip install \"anyio>=3.1.0,<4.0\"\n", + "!pip install httpx==0.24.1" + ] + }, + { + "cell_type": "code", + "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" @@ -65,30 +49,48 @@ "id": "-4c_AGBt3PlR", "outputId": "85aba038-ee9c-463d-bdbd-027cbfff0094" }, - "execution_count": 2, "outputs": [ { - "output_type": "stream", "name": "stdout", + "output_type": "stream", "text": [ "Please enter your OpenAI API key: ··········\n", "Please enter your GROQ API key: ··········\n", "API keys have been set.\n" ] } + ], + "source": [ + "import os\n", + "from getpass import getpass\n", + "\n", + "# Prompt user to enter their API keys securely\n", + "openai_api_key = getpass(\"Please enter your OpenAI API key: \")\n", + "groq_api_key = getpass(\"Please enter your GROQ API key: \")\n", + "\n", + "# Set environment variables\n", + "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "os.environ[\"GROQ_API_KEY\"] = groq_api_key\n", + "\n", + "print(\"API keys have been set.\")" ] }, { "cell_type": "markdown", - "source": [ - "We created a GeneratorStateLogger to handle the logging and saving into json files. To further simplify developers’s process, we provides a class decorator trace_generator_states where a single line of code can be added to any of your task component. It will automatically track any attributes of type Generator." - ], "metadata": { "id": "yWi2uEiE6UIf" - } + }, + "source": [ + "We created a GeneratorStateLogger to handle the logging and saving into json files. To further simplify developers’s process, we provides a class decorator trace_generator_states where a single line of code can be added to any of your task component. It will automatically track any attributes of type Generator." + ] }, { "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "qk9pkcCVzdek" + }, + "outputs": [], "source": [ "from adalflow.tracing import trace_generator_states\n", "from adalflow.core import Component, Generator\n", @@ -110,33 +112,33 @@ "\n", " def call(self, query: str) -> str:\n", " return self.doc(prompt_kwargs={\"input_str\": query}).data" - ], - "metadata": { - "id": "qk9pkcCVzdek" - }, - "execution_count": 13, - "outputs": [] + ] }, { "cell_type": "markdown", - "source": [ - "Here is the folder structer of where the trace is generated as a .json file and also an example output below" - ], "metadata": { "id": "LAZUSnYn-lnI" - } + }, + "source": [ + "Here is the folder structer of where the trace is generated as a .json file and also an example output below" + ] }, { "cell_type": "markdown", - "source": [ - "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAj4AAADGCAYAAADSbIrxAAAMTGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnltSIQQIREBK6E0QkRJASggtgPQiiEpIAoQSY0JQsaOLCq5dRLCiqyAuuroCstiwK4ti74sFBWVdLNiVNyGALvvK9+b75s5//znzzzln5pYBgN7Ol0pzUE0AciV5sphgf9aEpGQWqROoAV3ABAZgFF8gl3KiosIBLIPt38vb6wBRtlcclFr/7P+vRUsokgsAQKIgThPKBbkQ/woA3iSQyvIAIEohbz49T6rEayHWkUEHIa5S4gwVblLiNBW+1G8TF8OF+DEAZHU+X5YBgEYP5Fn5ggyoQ4fRAieJUCyB2A9in9zcqUKI50NsA23gnHSlPjvtO52Mv2mmDWny+RlDWBVLfyEHiOXSHP7M/zMd/7vk5igG57CGVT1TFhKjjBnm7XH21DAlVof4vSQtIhJibQBQXCzst1diZqYiJF5lj9oI5FyYM7jOAB0nz4nlDfAxQn5AGMSGEKdLciLCB2wK08VBShuYP7RMnMeLg1gP4iqRPDB2wOaYbGrM4LzX02VczgDfyZf1+6DU/6rIjueo9DHtTBFvQB9zLMiMS4SYCnFAvjghAmINiCPk2bFhAzYpBZnciEEbmSJGGYsFxDKRJNhfpY+VpsuCYgbsd+fKB2PHjmWKeRED+HJeZlyIKlfYYwG/338YC9YjknDiB3VE8gnhg7EIRQGBqthxskgSH6vicT1pnn+MaixuJ82JGrDH/UU5wUreDOI4eX7s4Nj8PLg5Vfp4kTQvKk7lJ16exQ+NUvmD7wPhgAsCAAsoYE0DU0EWELd213fDO1VPEOADGcgAIuAwwAyOSOzvkcBrLCgAf0IkAvKhcf79vSKQD/kvw1glJx7iVFcHkD7Qp1TJBk8gzgVhIAfeK/qVJEMeJIDHkBH/wyM+rAIYQw6syv5/zw+y3xgOZMIHGMXgjCz6oCUxkBhADCEGEW1xA9wH98LD4dUPVmecjXsMxvHNnvCE0EZ4SLhGaCfcmiIulA3zcjxoh/pBA/lJ+z4/uBXUdMX9cW+oDpVxJm4AHHAXOA8H94Uzu0KWO+C3MiusYdp/i+C7FRqwozhRUMoIih/FZvhIDTsN1yEVZa6/z4/K17ShfHOHeobPz/0u+0LYhg23xJZgB7Az2HHsHNaE1QMWdhRrwFqww0o8tOMe9++4wdli+v3JhjrD98y3lVVmUu5U49Tl9FnVlyeakad8GLlTpTNl4ozMPBYHfjFELJ5E4DiK5ezk7AKA8vujer29ju7/riDMlm/cwj8A8D7a19f32zcu9CgAv7jDV8Khb5wNG35a1AA4e0igkOWrOFx5IcA3Bx0+ffrAGJgDGxiPM3ADXsAPBIJQEAniQBKYDL3PhPtcBqaD2WABKAIlYCVYB8rBFrAdVIGfwX5QD5rAcXAaXACXwDVwB+6eDvAc9IC34BOCICSEhjAQfcQEsUTsEWeEjfgggUg4EoMkIalIBiJBFMhsZCFSgqxGypFtSDXyC3IIOY6cQ9qQW8gDpAt5hXxEMVQd1UGNUCt0NMpGOWgYGodOQjPQaWgBughdjpahlegetA49jl5Ar6Ht6HO0FwOYGsbETDEHjI1xsUgsGUvHZNhcrBgrxSqxWqwRrvMVrB3rxj7gRJyBs3AHuIND8HhcgE/D5+LL8HK8Cq/DT+JX8Ad4D/6VQCMYEuwJngQeYQIhgzCdUEQoJewkHCScgs9SB+EtkUhkEq2J7vBZTCJmEWcRlxE3EfcSjxHbiI+IvSQSSZ9kT/ImRZL4pDxSEWkDaQ/pKOkyqYP0nqxGNiE7k4PIyWQJuZBcSt5NPkK+TH5K/kTRpFhSPCmRFCFlJmUFZQelkXKR0kH5RNWiWlO9qXHULOoCahm1lnqKepf6Wk1NzUzNQy1aTaw2X61MbZ/aWbUHah/UtdXt1LnqKeoK9eXqu9SPqd9Sf02j0axofrRkWh5tOa2adoJ2n/Zeg6HhqMHTEGrM06jQqNO4rPGCTqFb0jn0yfQCein9AP0ivVuTommlydXka87VrNA8pHlDs1eLoTVGK1IrV2uZ1m6tc1qd2iRtK+1AbaH2Iu3t2ie0HzEwhjmDyxAwFjJ2ME4xOnSIOtY6PJ0snRKdn3VadXp0tXVddBN0Z+hW6B7WbWdiTCsmj5nDXMHcz7zO/DjCaARnhGjE0hG1Iy6PeKc3Us9PT6RXrLdX75reR32WfqB+tv4q/Xr9ewa4gZ1BtMF0g80Gpwy6R+qM9BopGFk8cv/I24aooZ1hjOEsw+2GLYa9RsZGwUZSow1GJ4y6jZnGfsZZxmuNjxh3mTBMfEzEJmtNjpo8Y+myOKwcVhnrJKvH1NA0xFRhus201fSTmbVZvFmh2V6ze+ZUc7Z5uvla82bzHgsTi/EWsy1qLG5bUizZlpmW6y3PWL6zsrZKtFpsVW/Vaa1nzbMusK6xvmtDs/G1mWZTaXPVlmjLts223WR7yQ61c7XLtKuwu2iP2rvZi+032beNIozyGCUZVTnqhoO6A8ch36HG4YEj0zHcsdCx3vHFaIvRyaNXjT4z+quTq1OO0w6nO2O0x4SOKRzTOOaVs52zwLnC+epY2tigsfPGNox96WLvInLZ7HLTleE63nWxa7PrFzd3N5lbrVuXu4V7qvtG9xtsHXYUexn7rAfBw99jnkeTxwdPN888z/2ef3k5eGV77fbqHGc9TjRux7hH3mbefO9t3u0+LJ9Un60+7b6mvnzfSt+HfuZ+Qr+dfk85tpwszh7OC38nf5n/Qf93XE/uHO6xACwgOKA4oDVQOzA+sDzwfpBZUEZQTVBPsGvwrOBjIYSQsJBVITd4RjwBr5rXE+oeOif0ZJh6WGxYedjDcLtwWXjjeHR86Pg14+9GWEZIIuojQSQvck3kvSjrqGlRv0UTo6OiK6KfxIyJmR1zJpYROyV2d+zbOP+4FXF34m3iFfHNCfSElITqhHeJAYmrE9snjJ4wZ8KFJIMkcVJDMik5IXlncu/EwInrJnakuKYUpVyfZD1pxqRzkw0m50w+PIU+hT/lQCohNTF1d+pnfiS/kt+bxkvbmNYj4ArWC54L/YRrhV0ib9Fq0dN07/TV6Z0Z3hlrMroyfTNLM7vFXHG5+GVWSNaWrHfZkdm7svtyEnP25pJzU3MPSbQl2ZKTU42nzpjaJrWXFknbp3lOWzetRxYm2ylH5JPkDXk68Ee/RWGj+EHxIN8nvyL//fSE6QdmaM2QzGiZaTdz6cynBUEFP83CZwlmNc82nb1g9oM5nDnb5iJz0+Y2zzOft2hex/zg+VULqAuyF/xe6FS4uvDNwsSFjYuMFs1f9OiH4B9qijSKZEU3Fnst3rIEXyJe0rp07NINS78WC4vPlziVlJZ8XiZYdv7HMT+W/di3PH156wq3FZtXEldKVl5f5buqarXW6oLVj9aMX1O3lrW2eO2bdVPWnSt1Kd2ynrpesb69LLysYYPFhpUbPpdnll+r8K/Yu9Fw49KN7zYJN13e7Le5dovRlpItH7eKt97cFrytrtKqsnQ7cXv+9ic7Enac+Yn9U/VOg50lO7/skuxqr4qpOlntXl2923D3ihq0RlHTtSdlz6WfA35uqHWo3baXubdkH9in2Pfsl9Rfru8P2998gH2g9lfLXzceZBwsrkPqZtb11GfWtzckNbQdCj3U3OjVePA3x992NZk2VRzWPbziCPXIoiN9RwuO9h6THus+nnH8UfOU5jsnJpy4ejL6ZOupsFNnTwedPnGGc+boWe+zTec8zx06zz5ff8HtQl2La8vB311/P9jq1lp30f1iwyWPS41t49qOXPa9fPxKwJXTV3lXL1yLuNZ2Pf76zRspN9pvCm923sq59fJ2/u1Pd+bfJdwtvqd5r/S+4f3KP2z/2Nvu1n74QcCDloexD+88Ejx6/lj++HPHoie0J6VPTZ5Wdzp3NnUFdV16NvFZx3Pp80/dRX9q/bnxhc2LX//y+6ulZ0JPx0vZy75Xy17rv971xuVNc29U7/23uW8/vSt+r/++6gP7w5mPiR+ffpr+mfS57Ivtl8avYV/v9uX29Un5Mn7/rwAGlEebdABe7QKAlgQAA54bqRNV58P+gqjOtP0I/CesOkP2FzcAauE/fXQ3/Lu5AcC+HQBYQX16CgBRNADiPAA6duxQHTzL9Z87lYUIzwZbI7+k5aaBf1NUZ9Lv/B7eAqWqCxje/gsy+IMtImMZLAAAAJZlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAACQAAAAAQAAAJAAAAABAAOShgAHAAAAEgAAAISgAgAEAAAAAQAAAj6gAwAEAAAAAQAAAMYAAAAAQVNDSUkAAABTY3JlZW5zaG90r8HhGAAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAAttpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjU3NDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xOTg8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8dGlmZjpYUmVzb2x1dGlvbj4xNDQvMTwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+MTQ0LzE8L3RpZmY6WVJlc29sdXRpb24+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgrknrQzAAA14ElEQVR4Ae2dB5gURfqHC8EEKCpiDijqKYpZxIQohsOcFfGMp+cJio9i4Mz55MR0KuZMMGBWFEyYzqyYsxjAiIpgRvnPW3++tra3Z3ZC727Pzu97nt1OVdXVb/dM/6a+r6pa9ejRY6aTiYAIiIAIiIAIiEANEJitBq5RlygCIiACIiACIiACnoCEjx4EERABERABERCBmiEg4VMzt1oXKgIiIAIiIAIiIOGjZ0AEREAEREAERKBmCEj41Myt1oWKgAiIgAiIgAhI+OgZEAEREAEREAERqBkCEj41c6t1oSIgAiIgAiIgAhI+egZEQAREQAREQARqhoCET83cal2oCIiACIiACIiAhI+eAREQAREQAREQgZohIOFTM7daFyoCIiACIiACItCmlhF06NDB9enTx3Xu3NnNM888JaF4+OGH3UMPPVRSHiUWAREQAREQARFoXgI1K3wQPQMGDHBt27Yt6w5suummPp/ET1n4lEkEREAEREAEmoVAzbq6aOkpV/TYnUL89O7d2za1FAEREAEREAERyDiBmhU+uLfSMImfNCiqDBEQAREQARFoGgI16+oqNaan0O1A/Jjrq1C6QsemTZvmJk6c6MaMGeOmTp1aKKmOiYAIiIAIiIAIlEmgZlt8yuTVaNkQYt26dfNxR8QfyURABERABERABNInIOGTPtOKSiTuiPgjmQiIgAiIgAiIQPoEatbVlT7K9EqsJP6oZ8+erl+/fr4yjz7yiBs5alR6FVNJIiACIiACIlDlBCR8MngDK4k/WmKJJdwCCyzgr6rLcstl8OpUJREQAREQARFoPgJydTUfe51ZBERABERABESgiQmoxaeJgTfW6ZZeemk322yzuY4dO0anaN++vVtmmWX89qeffup+++03v77kkku6Nm3auF9++cVNnjzZtWvXzm2wwQZu8cUXd4899ph7//33ozJYIe1qq63mVl55Zffdd9+5Dz74wL355pvu999/r5MuaWPOOed0a621lltqqaV8/d5++233xhtvuJ9++ikpeZ191GudddbxeadMmeJefvllN2nSpDppkjaIk6KuXbp08eekvpzz+++/T0qufSIgAiIgAjVEQMKnhdzsc845p96V8OIfMmSI33/VVVe5+++/36+fe+65fvnzzz+7xx9/3G2++eZR3k6dOkV5FllkEXfKKadErrMoUW5lxowZbsSIEe7uu+8Od9dZP/TQQ92GG27oxUd4YObMmV5gDRs2LK94Ovroo93aa6/tWrVqFWb14o1rffHFF+vsZwOBdsghh3gRhwiM24QJE9yFF14oARQHo20REAERqCECFQsfRi6udAwb4635r4xE0yznmmuuOqInPCtB0oiI1q1bh7ujdUTG3nvv7RAx99xzT7TfVvr37+8oI8kQMxtvvLHrutJK7pBcurgde+yxvpUovp/t2Wef3Q0ePNiLrttvv71OkjPPPDNq4apzYNYGrVbnDh3qDvrHP9wff/yRlET7REAEREAEWjiB1rlg2JMrucYPP/zQ/yo3l0q5ZTW16Mn6VBPwKMVef/1134qCmCHAGWNAxAsuuMCNHz/et5DQSoPttttufmn/cHlxH1977TX31FNPeXfSGWec4UUGab7++msvbq688kpHK9Fiiy3mcGFhq666qrvtttu8API7cv923HFHt+2229qmL/Paa6919957r2+Vodca4gdXFq65559/PkqLmOrVq5ffRpzceeedjvPSwrPssstGk8l27drVtzaZu41Wq8022ywqhzJHjhzp880///yRCxA+Cy64oHvuueeitFoRAREQARGoHQIVt/iAyibqLLflp6lFT0u8vcSwYAgRM0aARswUsi+++MIPmhimIa7GhA0jStN6Yy0kw4cPd/fdd5+7/PLLfRZcSoiQ8Dx9+/aNigtdbOy85JJL3HvvvecOPPBAn4bYoksvvdSvI2xCwXTCCSe4d955xx/76KOP3AsvvOAuvvhit9BCC/mWqP3339/hLsPWXXddv+QfIu7ss8+OthF+++23n9tqq638vlVWWSU6phUREAEREIHaIlA/EKLM60f8lNpKwakkesoEnkI2xMwxxxxTr6RPPvnEt5bQYnL++edHoscSfvvtt+7zzz+3TbdSzmVltuKKK0ZxOQQTW1yRHWc5duxY9+OPP/pdtMBY9/0ddtghSvbqq69GoifamVu54ooros1QwORzyVni0aNHu7feesv/0TomEwEREAERqE0CqbT4GLpSW34keoxc8ywRPj/88EO9kyNqcF+ZEc9DTzB6Zs0777x+dxg83CaIA1pzzTUtm2+hiTZiK7jg6EWGmQgKBRTCh0DruIWCy+pCGlx0JoRwu5544onujjvucK+88oovAhFGC5JMBERABESgtgmkKnxAWaz4kejJ/oPXo0cPPwo0vbuKtVC80FU+nxGzE++ZNffcc0fJ99xzT8dfITN3HGkeyY1SzVQfCDSMec/4IwaIetDKNG7cuLy9yHwm/RMBERABEWjxBFJzdYWkGnJ7SfSEtLK5TlzPkUce6ULRQw8uAqQJhmY9ycIJVr/66qukJHn30bJUioVd3anXEUcc4cWNBXFTFi4wxNABBxzgiE9ab731SjmF0oqACIiACLQwAqW9aUq4+HwtPxI9JUBspqS4jKxnFVWghxSigUEQzXAbhYHUtp8BDhdddFG/SRByKcYAixarM2bMGEdgdSGzHl1hGoKu+aO1aqONNnLEHJlLjLIRR/GA6zC/1kVABERABFo2gUYTPmCLix+Jnup4mHbeeeeoooiesIdUdCDPCuLI3F0Ww5Mnab3diCZrYaI31xNPPFEvTbE7nn76accfRvf+k086yXWYbz6/Tc+xpKBrf1D/REAEREAEWjSBRhU+kDPxE19v0VSr/OIY58aMHl5Jxtg4ScbYQWbdu3e31XpLemfNN0uI0C2d1p3PPvssEj477bRTXuGDS2yOOeaIgqKJDbrmmmv8OQiUprzQEGP/vegid/zxx/vdSUHTYXqti4AIiIAItFwCjRLjE8eF+AkFUPy4ttMjQPyNWbkveFpezDbdZBNbjZaHH354FETMztlzIsSMVj2bEwxBkhSgvEmuTBM9nMtcWrfeeqsV48tnIMS44bZi7B7cVauvvro/zLxfv/76q3eT0TU+aTyp6dOnR0Xli0+KEmhFBERABESgxRJo9BafFksuoxf28ccfRzUj1uawww7zA/q99NJLdWJ0okQJK6QlNgbDPXT11Vf7AQppaWG/jbtjWS2Ghm0Cixn/hxGYMcQLrq8nn3zSj/pM7A2Tlpo9+OCDturH7WGQQjuOaCI9E6cSz0MvLaadsN5c/fr18xOXUgATmFrg8sEHH+y2335798wzz3hRRR7r6k5aBkOUiYAIiIAI1CYBCZ8Wdt8RLYgPRAq9ngjw5Y/pIUaNGlXU1TKGD1N6WHAyQsdEhRVAq4n1qmJm+NCYuJTWGAt+RiyZkArT0Qp40003hbv8gInEFDEtBsZozvzFDfHCnF5mTD5KfZmYlXqRP6nFCDY2Savl1VIEREAERKB2CDSJq6t2cDb/lfJivygXz5LU46mU2g0aNMj35oq7hSiX8XCYOsLMenHZNsvTTjvN965ibq+4MZUGriqbqiI8TvqBAwe6m2++OXKZhceJ4WGwQuoXXiPXjRB64IEHHK6vuHEdDGY4YMCAOqNOx9NpWwREQAREoGUTaJVzJSQPyNKyr9sxCWeW7bjjjquoesxizhxa7du39wP40UJi822VUjBdwJdffnk/sSeBy2GX9mLLwTWFmwrxMWHCBN8iVWxe4oRwU1F3hIvFDzWUnxYvWpyYEmPSpEmOIO1yrr+h8+i4CIiACIhAdRGQq6u67lfRtUUgIDIqNVpVmOOqEiPgOpyBvZSyaL0pZyZ1WoDiI0OXcl6lFQEREAERaJkEatbVZT2Jsnhbs1y3LPJSnURABERABESgWAI1K3zC8WaKhdVU6bJct6ZioPOIgAiIgAiIQGMQqFnhw5QINit4Y4Att0zqRN1kIiACIiACIiAC6RNonRvO/+T0i81+icSdECzLGDRt27aNxoZprprj3mKahhEjRjh6PclEQAREQAREQATSJ1CzvbrSR6kSRUAEREAEREAEsk6gZl1dWb8xqp8IiIAIiIAIiED6BCR80meqEkVABERABERABDJKQMInozdG1RIBERABERABEUifgIRP+kxVogiIgAiIgAiIQEYJSPhk9MaoWiIgAiIgAiIgAukTkPBJn6lKFAEREAEREAERyCgBCZ+M3hhVSwREQAREQAREIH0CEj7pM1WJIiACIiACIiACGSUg4ZPRG6NqiYAIiIAIiIAIpE9Awid9pipRBERABERABEQgowQkfDJ6Y1QtERABERABERCB9AlI+KTPVCWKgAiIgAiIgAhklICET0ZvjKolAiIgAiIgAiKQPgEJn/SZqkQREAEREAEREIGMEpDwyeiNUbVEQAREQAREQATSJyDhkz5TlSgCIiACIiACIpBRAhI+Gb0xqpYIiIAIiIAIiED6BCR80meqEkVABERABERABDJKQMInozdG1RIBERABERABEUifgIRP+kxVogiIgAiIgAiIQEYJSPhk9MaoWiIgAiIgAiIgAukTkPBJn6lKFAEREAEREAERyCgBCZ+M3hhVSwREQAREQAREIH0CEj7pM1WJIiACIiACIiACGSUg4ZPRG6NqiYAIiIAIiIAIpE9Awid9pipRBERABERABEQgowQkfDJ6Y1QtERABERABERCB9AlI+KTPVCWKgAiIgAiIgAhklICET0ZvjKolAiIgAiIgAiKQPgEJn/SZqkQREAEREAEREIGMEpDwyeiNUbVEQAREQAREQATSJ9Am/SJVYloEOnTo4Pr06eM6d+7s5plnnpKKffjhh91DDz1UUh4lFgEREAEREIGWTkDCJ6N3GNEzYMAA17Zt27JquOmmm/p8Ej9l4VMmERABERCBFkpArq6M3lhaesoVPXZJiJ/evXvbppYiIAIiIAIiUPMEJHwy+gjg3krDJH7SoKgyREAEREAEWgoBuboyeidLjekpdBmIH3N9FUpX6Ni0adPcxIkT3ZgxY9zUqVMLJdUxERABERABEcgsAQmfzN6abFUMIdatWzfXpUsXd9FFF5UtftZaay3Xvn37ehdnwuqbb76pdyzrO+aaay7Xs2dPt/TSS7uOHTu6KVOmuE8++cSNHz/e/fTTT2VVf8MNN3StW7d23333nZswYUJZZSiTCIiACIhAfQISPvWZaE8BAsQdEX80atSoAqnyHxo0aJBr0yb/Y/fHH3+49957zw0fPty98cYb+QvKwJE555zTHX744W7NNdd0s81W32u83377uVdffdUNHTq0JAG03nrruYEDB/ornDlzptt3333djz/+mIErVhVEQAREoPoJ1P+2rv5r0hU0MoG04o+SqomAWGGFFdwpp5wSvfyT0jX3vvnnn99dcsklbu21104UPdSPa1lttdXcsGHDXKdOnYqu8g477BClbdWqldt5552jba2IgAiIgAhURiD/T+/KylXuFkwgrfij0aNHu6+//trxcl9yySXdcsst5xBVs88+u6eHu2ehhRZyxx13XOZoDhkyxM0777xRvT788EP37LPPunfffddfBy695Zdf3h9v166dI/0BBxzgaNEqZHPPPbdbZpll6iTp1auXu+GGG+rs04YIiIAIiEB5BCR8yuOmXCkQeOKJJ9ynn35apyTcYAidVVZZxe+n9Wfbbbd1d999d510zbnRd4893HzzzRdV4bacgBsZuP6IyUHUbbfddm6vvfbywo64JlxWV199dZQvaWWnnXby6cNjCCxiq95///1wt9ZFQAREQATKICBXVxnQlKXxCMyYMcO7uZ577rnoJP369cvrTiIRLiVcTrvssosjLe6lQnFEUcGzVojVWX/99d0eOUGz5557OlpraHnJZ1tvs0106IUXXqgjeqIDuZW77rrLIe7MNt98c1vNu9xkk02iY88//3y03rdv32hdKyIgAiIgAuUTUItP+eyUsxEJ0HPsmmuu8aKG3k30mnr00UfrnXHXXXd1tJKEQsdiZAiSPvXUUwsGFh966KEOl1o8OJmg4scee8zH5/z+++/ReWmBQihhpLniiiuiY0kr1157rS8fdx51pCXrtddeS0rqll12WceI3dj06dN97zkYkJd81LEhV1liwdopAiIgAiIQEWjxwoeRiysdw8Zoaf4rI9H4S3ox0Zqyzjrr+JPRWhIXPrjAdtttt7yVIWYIYXLSSScluon69+/vBVVSAYiNjTfe2HVdaSV3SC6dWTgS9meffea7rtuxpOX333/vu7YvtdRS/jBl5hM+tDiZPfXUU+6HH35wkydNcosvsYTv2r7FFlu4+++/35JoKQIiIAIiUAaBFu/qYq4qBEulJtFTKcHS8xMobLbgggvaql/S5XvvvfeO9r3zzju+2/ixxx7rbr/9dmetNLTOsC9uO+64oyNo2Ayhcdppp7mjjz7aPfLII741h2OdcsHVBx98sCWr0zvryy+/jPYXWvnqq6+iw/HrsAO05qy66qq26WOE2LgvN2Ck2VZbbWWrWoqACIiACJRJoMULH7hUKn4kesp8uirMRk8ps3DeMkSCjXPD8ccff9wHRD/99NO+ZWfEiBHuqKOOisQPgci0DoUWxsxcddVV7rzzznOvvPKK45x0U7/yyiuj5BtssEG0bq4odkzKtcYUY1988UWULAyKjnbmVjbbbDPfqsM+BJUN5Pjggw9G17Hooou6BRZYIMymdREQAREQgRIJ1ITwgUm54keip8QnKsXkH3/8cVSadXFnB+4m4n4wgqEvvvhivx7+Y+TksKUvFD4rrrhi1HMKV1SS+2js2LHRoIGMzGxd+OfOrZuFLTm2L2kZtgxZfFA83dZbbx3t4lk1I6bnzTfftE23++67R+taEQEREAERKJ1AzQgf0JQqfiR6Sn+g0syB4DAjkNjM4n7Yfvnll6MWETtuy+uuu85WI+HCDkZaNiOOKJ9dcMEF7vrrr/d/NnLyn7Vw0XhD+fLbfuKFIguuw/YxGOJiiy3mN7nOeNf9O+64w5L63mfRhlZEQAREQARKJtDig5vjROzXdEMBzxI9cXJNvx0O5Pfbb79FFUAomNGyk89++eUX3yJEbyr+aG1h30q5gGWzyZMn22q95Ysvvuj4Cy2cewvXUzEWpvsxYe6usBUHV1t4rZTPuEA///yzQwjyh3CL16uYeiiNCIiACIiAczUnfLjpDYkfiZ5sfDTo3m1G926zsCWooZniERHW1R0BMjE3w3wYp1Osu8rOzaShTEaKFTsNxSKLLGLZHa61uDGGkBkjWBNjFLc55pgj2sUUFhI+EQ6tiIAIiEBJBGrK1RWSyef2kugJKTXv+sorrxxVgK7jZhb4y3bSTO+WjmUoGCwf4sWMKTFKsTCgefHFFy8qayh8wusgM4MthoMlEsuEoIr/heMM0U0/X6xQURVSIhEQARGoYQI1K3y453HxI9GTnU8C81wxTYPZnXfeaat+XBzb6Ny5s63WWzLVgwVBEztjrS3hNBnFihcrfEzQvZweVgRKF7IlcmPwhOKKoOnQGG3ajC74uLTy/dnghYggpsOQiYAIiIAIlE6gJl1dISZze7EvXA/TaL1pCdDqMWjQoOikxNXQ1dyMMXu23HJLv7nGGmv4ION4XAwHmRTUbMqUKbbq3V220b17d1utt2TwQ+t+vv/++7tp06a5zz//3Asom6D0wAMPdEceeWS9vLbjoIMOslU/gvRHH30UbXOdjARtxhxlhebjYjoOG5WaAR1vueUWy6qlCIiACIhAkQRqusXHGCF4JHqMRvMuGcSPiTzD8WqGDh1ap1JMJcGoxhgtOoMHD65znI2uXbu6Hj16RPvHjRsXrdOyZ0IJNxPzc8WNObNM9OAaQ/SYMe6PGSMyn3HGGfWmvOA4gyGGgdQ33nijZfNLWm3MhcX1FBI9ZKB3l/VuI8Cb1iSZCIiACIhAaQRqvsWnNFxKnSYBWmToJo54scH5wsBlznXPPff4Xk3x8w4bNixqFerWrZsfywfxijsL0cP8W9aN/PXXX3e33XZbVARj/4wcOTIa+ZlRnBEoTz75pHczIZiYqNSMQQRDY5TnPn36RG4uWm3oOo9woZfYwgsv7N107dq1i7IxJlHczcUUFGaU2ZAhjogxMsGDYBsyZEhD2XRcBERABEQgICDhE8DQatMSYOLNfEa8CxOVhrObh2mfeeYZ98ADD0QuL+JowtGYLS1ChIlK48ZYOauvvno0TQSxOknxOoipm266KZ7dz/91/PHHO0QXhmAjGDsMyLZMU3MtRowkHRqxRWGr1ujRo8PDedeJMcK9huHmk4mACIiACJRGQK6u0ngpdYUEzFUTL4b9jLHD9A60xhDPkk/0WF6mlTj99NPruKHsGIHADG6Iu8mCgu2YLZmb6/LLL/etPLbPlnSTx6V16aWX2q46S8pEUF122WXR9BJ1EgQbHXJTZuyzzz7BHlcnOJku9WEMUp2EsY1wCgu66YeDOcaSalMEREAERCCBQKtcs344GG1CEu1qDgLEjWTZCMTNkuEuw11FXA7iiYEAcWkVa3QPp/UGAcaAgaXk5RzMJca4Qx07dnSIJkTJEUccUWd0Z+p0wgkneIFXbL2UTgREQAREIF0CEj7p8kytNAmf1FA2W0H0/DrrrLPqdGdnAMW426vZKqgTi4AIiEANEpCrK6M3PexFlLUqZrluWWJFoHX//v0ds8ZjjM9zzjnnZKmKqosIiIAI1BwBBTdn9JbTMmCBs1mrInWTFU+A7vhMS0Hvsoam2Ci+VKUUAREQAREoh4BafMqh1gR56L1jM4I3wemKPgV1CkcvLjpjjSeku7pET40/BLp8ERCBTBBonRsT5ORM1ESVqEOAHk6MVkycCIGzzT03E+4tRkweMWKEXuB17pQ2REAEREAEqomAgpur6W6priIgAiIgAiIgAhURkKurInzKLAIiIAIiIAIiUE0EJHyq6W6priIgAiIgAiIgAhURkPCpCJ8yi4AIiIAIiIAIVBMBCZ9quluqqwiIgAiIgAiIQEUEJHwqwqfMIiACIiACIiAC1URAwqea7pbqKgIiIAIiIAIiUBEBCZ+K8CmzCIiACIiACIhANRGQ8Kmmu6W6ioAIiIAIiIAIVERAwqcifMosAiIgAiIgAiJQTQQkfKrpbqmuIiACIiACIiACFRGQ8KkInzKLgAiIgAiIgAhUEwEJn2q6W6qrCIiACIiACIhARQQkfCrCp8wiIAIiIAIiIALVREDCp5ruluoqAiIgAiIgAiJQEQEJn4rwKbMIiIAIiIAIiEA1EZDwqaa7pbqKgAiIgAiIgAhUREDCpyJ8yiwCIiACIiACIlBNBCR8quluqa4iIAIiIAIiIAIVEZDwqQifMouACIiACIiACFQTAQmfarpbqqsIiIAIiIAIiEBFBCR8KsKnzCIgAiIgAiIgAtVEQMKnmu6W6ioCIiACIiACIlARAQmfivApswiIgAiIgAiIQDURkPCppruluoqACIiACIiACFREQMKnInzKLAIiIAIiIAIiUE0EJHyq6W6priIgAiIgAiIgAhURkPCpCJ8yi4AIiIAIiIAIVBOBNtVUWdU12wQ6dOjg+vTp4zp37uzmmWeeiir78MMPu4ceeqiiMpRZBERABERABOIEJHziRLRdFgFEz4ABA1zbtm3Lyh/PtOmmm/pdEj9xMtoWAREQARGohIBcXZXQU96IAC09aYkeKxTx07t3b9vUUgREQAREQAQqJiDhUzFCFQAB3FuNYRI/jUFVZYqACIhA7RKQ8Knde5/qlVca02OVIbYnbhI/cSLaFgEREAERKJeAhE+55JSvUQgQ0yPx0yhoVagIiIAIiECOgISPHoPMEZD4ydwtUYVEQAREoMUQkPBpMbeyZV2IxE/Lup+6GhEQARHICgEJn6zcCdWjHgGJn3pItEMEREAERKBCAhI+FQJU9sYlIPHTuHxVugiIQOMSWGKJJdzAgQPdX//618Y9kUovmoAGMCwalRI2FwHED0bvrtBs246Hx7QuAiIgAlkgcNppp7n27du7DTfc0E2dOtX973//y0K1aroOEj41ffur5+JN3JjYsZrbth23/Vpmh0DPnj1dv379fIUefeQRN3LUqOxUrpFqwkjmQ4YM8aV/+umnjpdf2nbQQQe5tdZayxd78cUXu1deeSXtU5RUXlNcc0kVykjiNm3+fM0usMACGalVbVdDrq7avv9VdfWIm3xd3avqQmqssjT184XPX5fllquJq+cXvl3zsssu2yjXvFyOpZ1jkUUWaZRzlFJoU1xzKfXJStprrrnGff755+6FF15wDzzwQFaqVdP1+FOK1jQGXXy1ELCWHWvpqZZ6q54iIAK1SYAfa0k/2GqTRjauWsInG/dBtZhF4IwzzsgMi06dOrl1113XzTvvvO7ll192b731lvvjjz/cQgst5Nq1a+d+++03hxsjn/ErfPXVV3csP/zwQ/fSSy+577//PjE55+IX88yZM93EiRN9Gs679tprO1pM2DdhwgQfI5BYQLCTuq2zzjpuqaWWclOmTPF1nzRpUpDiz1VG3F5wwQX9Dn6V/vTTT26FFVbw1/3DDz+422677c/Es9bIg4uFaUomT57s3n33XX998YRLL720m2222VzHjh2jQ1zjMsss47dhB8O4wRduiy66qPviiy/ciy++6L788st4smi7nGuIMhe5wnV07drV0cqCS+ejjz7yzwPMQrNng7qb4eqwa+Z+5HsGFl54YX/faMXhfr/55pvuq6++smL8cvbZZ/fPAxtzzz13dIy8nGPGjBnuk08+ifaHK6U8F2G+htZLuWZ7zikThq1atfLPOGzffvtt99RTT9U7XTFc6mWatQMmlE0dP/vsM/faa68V/MxaOdxvnvHlunRxM37/3b366qvunXfe8Z9/S1PskuefzzLGNfMdkmR8nlZcccXouedzxZ8sfQKtevToMTP9YlVirRFobsFy3HHHpYYcocL12JeVFYwoGTFihNtqq63c/PPP73fvuuuudjha8mI8+eSToxdUdCC38vXXX7sTTzyx3gvt6quvdjbtB7EbnJ+XRGic/5FcjMywYcPC3XXWjz76aP8i4YUSGgLjnHPO8SIi3P+vf/3LrbHGGn7XmDFj3CabbOLmmmuuKEl4fbnvCte/f/86xy3hd9995/773/9GcSZMWHvdddfZ4cTlVVdd5e6///7oGC982Cy++OLRPlvhhc51P/bYY7YrWpZyDVGmEla22247t8ceezhER9wQfhdccIH74IMP/KHrr7++jiCJpycOJx7vs88++zgm+W3dunU8uXv//ffd0KFDo+dlm222caQvZH379vUCKExT6nMR5m1ovZRrDp/zK664wh1wwAFeHHOOqbln6O8HHhidrhQuUaZZK8svv7w76qijos9peJwA43PPPde98cYb4e5ofccdd3S77babC2NzOMjnb/To0e6mm26K0hazQvwVwgv797//7V1eYb7VVlvNf67sOyU89uOPPzqYjR8/Ptyt9QoJKManQoDK3rII8Ivr/PPPryd6uErEBEG6SV9QRoEvy6E5gUErTZLRuoJA4Is5n11yySX1RA9pOT8uvp122ikx67HHHutbDOKih8S8tAcPHuz4Us9nvHxD0ROmQ4wdeeSReY/PN9987vjjj3dLLrmkz5ZUh7C8+DrnRZgliR7SwvXQQw91RxxxRDxrne1C11AnYZEbe+65p/vb3/6WKHooYrHFFnNnnnmmXxZTZMiFe/Kf//zHIWaSRA/ldcm1OJDGjof5850vnqbS5yLfeYrdH6+P5TswJ3JoWYlbOVzCMvjBcOqpp+b9nPLD5JRTTvGtmmE+1rfYYgvHPY+LHo5xHbvssosX52yXY3EWtOjyoy3fdwo/IAYMGOA222yzck6nPHkIyNWVB4x21yaBw3IvV3vJ8AuPX1rPPvusf6FvtNFGeQWN0Tr77LNdh5wIwH7++WfHr2FcFriP9ttvPy8cKJ+X+GGHHWbZ6iz50v3ll1/8L0POjbuNL0hrcUD4xF1Qe++9d9TDh6b0u+66yz3xxBPejcUxXtDY7rvv7u677z5ffp2TztrgmnHH0MROHTDybr755rNSON/kT8sT7rutt97arbfeev5FwZc6L1lahXCTnXTSSf7FtuWWWzpaizBcONYS9N577/l9vPx4uVuL1+8518Ktt97qr59zI9Zwm2Gca4MNNnBPPvmk3076l3QNSeka2kd9QqH48ccfu3HjxnmXCWOy0FLGveSPlxfXTYsV7jxaDf/xj3/4U/Ac8FxgoWtshx12cJ1z7g2Me0bw69ixYz1LWhVXWWUV/7LFRYXwpMXrwQcfjNyKh/zzn67TrJYE8tFNGnah+zCt58JXMs+/Uq45XgT1/Tzngvog9yyZ27gcLlYuAppnyYQLLSbDhw93r7/+un+G9s99Bu3zefjhh3uu06ZN89lpefn73/9uRbnnn3/e3Xnnnf7eUCfcx1i3bt38vcFtVqnxzJgYoh7jcvfxpZxbne8a/sydiUjkMwcvWeUEJHwqZ6gSWgiBNddcM3qRcEm8rHgZYc8995wXG7iwVl55Zb8v/o9fisTVYIgGvkRNPPCl/vTTT/tma16UxIDwRUvcTtxw6yCKvvnmG3+Ilzy/Ymky50tyzjnn9AKKFypGr6Ftt93Wr/PvhBNO8OKEdWIKuAZrbufc+++/f153GW6VZ555hqyRhe4u6nv66adHxy688EKHgEHUYRYvxLq5ElZddVU2veFmiL8wcHcgFDCunRcSsT0Y4orrR1BZ120EBS/5fLESSdfgCyvxn4k1siEmcJ3YOeFA66C5rcwtibDDvv32W7/kH9cUv2b2hwH6V155pRdV7Md46SIoeMlixKlgxGBZWdNz4tKcodxn2+8T5v6l+VxYmUnLUq45zA8XXvz2nNuxcrhYXp4lhCKGkPhnThzaZ5A4Nz6DsEbUIo569+7t7rjjDp+eZ8xECPsQTGYMTUBZVreDDz7Yt8TY8XKWuHYRyWa0QnEfMeIJb7zxRv99QT35ccCzQKyhrHIC9dsZKy9TJYhAVRLgV7bZpJxQMdFj+1gifOzlF+5nvVevXtEuvrTsC9d28uvz8ccft03fchFtBCvEH8RfBgS52i9TkhJka8avUTMLwrRtWxJPYUZLQpIhJuKih3T80hw5cqT/46URN+J07JcoX9AmAuLp8m2vv/760SG6+5roiXbmVhAzdg5+BXfv3j08HK3nu4YoQQkrc8wxR8HUvJxokWNJYK61yBXMFBy8+eabI67WWzE47Fu9bDufK8SOJy3Tei6Syk5jH6I5/pxTbiVcaFk1u+GGG+p9Bvns0ppoxo8PjGcw3kpkaWx5+eWXR5/9Up9xKyNcIvwKGSL39ttv988Xz1j8+6RQXh0rTKBN4cM6KgK1QyD8Mru/wHgbuFKSLHw5IUDC8ix92OMm7Pljx1mG7pBw//Tp06PYozDweqWVVoqS5TtvWGaYN8qYW4n3ILJjBOSGg+MRd4DriXgeWp+wkEkpAoAWqPBXL4IxyWhx4dro7YXxguPXe9zyXUM8XTHbCL59993XJ+WaeFHz0sSFaC8t3CrlGuWHRgsALYbEOSEgYWMWrtu+hpZpPRcNnafc4+baiuevhIsFESNw4uXYedhvPGlRxHChmtHqkvTZ5Tg/XnheuT98jvL10LOyCi3Jy2fann9aD3FR47a0chGB/MnSJSDhky5PlVbFBAh6NLMmZ9tuaBl22SYtAdINWTxPQ+nzHbc4AI7jbuOvkJlYKZQmfowveuJd6OGE8EnLQvGHmDBBkVQ+MTYmfMylmJQurX285IjpsfgmulXjmjnkkEO8KwsBhEskbIkr9dy4XOi9xHQG9jIutYx86Zviuch37kr3l8OFHx7WavPrr7/mrQItKXfffXed4+FnEcFIB4OGDFdipa4nejYS78fni/tFDB5/xMhRNkI7n0BsqH46np+AhE9+NjpSYwTCF0+pLQfhF2ex2BpypRRbjn3ZF5ve4hiKTQ8XhJzF4Vg+flXjfmJZjpiiHHqDmYVBubYvXIZxM2HrWpgm7XXcG8Qw0bPLfpnDj9YZRCB/BIszOm+pBk/cmvEWMpiaACyXK3Vp7Oei1OstNn25XMLn0+Lfij2nxQUVm5504bNbSr4wLeKZ2KNBgwZFXd45Tn0I4ueP54+4PXsmwvxaL4+AhE953JSrBRL4JRcsbN25ebEx5k6xFqaNxxHkKyMpliVf2kL7EQwm2hiLp6EWCIuVKVRmeCwMPuba7r33Xt/SYc3xpL322mujoNIwb0PrYctaQy/q8MVGa0xTmY28S9dyujsTI4UrxAQksWG0BjFGSylGTzATPbRCMEYULUx2f7inoyqY16yxn4tSrrWUtOVyCQfptM9xsefF5WTGmEwElzdkofu3obSFjuNuoyURMc9wDPQWxI1sn2ni+S699NI6Pc4KladjDROQ8GmYkVLUCIFvcwOoWVdXmrEZqbVYI0gTUUCTNX8MdMZ2UxiDB5oooM78ikzTcMOY0aX60Ucftc2Kl4g0XvR8ySMCaOHIF8RpYwRx0lBoVlyJIgtgMEEbPBJ33zHHHBP1tuJlxX0v9p4TH2L3jPgoxmoJhWSRVSqYrLGfi4InL/NgJVzgZ5/BUlvKEOAWGM1wDrfcckuZV1B+Nlo0Eb/88SzRm5JxgzDc8PSOTEtslV/LlpFTvbpaxn3UVaRAgCHtzfjllWT8kuRLKcnwy5vhGslnxC/kKyNfnkL7w3rnG9yQ/LSolBOfE75EksQgx8M0heqadGz6rHFUOJZvVGLqTfdxs7TFnZUbLonzoMWFPxMpdpwWJwYuNKHD/bSu55am0DIc4BLhlyR6Ko1jauznotD1lXusUi7WEkhrHGNMJRnuI4QNfwwZgNmYUqwjYgs9z/k6B5C3FGMwTnu+4p9bnitGiA57vTGujywdAsnf4OmUrVJEoKoIhEPRxwft40J4udGLx1wc8YsLe5Hg/ghf1JaWfcSN0HQdjnljx8tZht1zaRUJB92z8viyprWCYEoLELZjDS3DQNG99tqrTnLE1HnnnVcnniQeuxS24CT1lrk11zpmxjgp8VGt4Y77w8Qi8RtJQw1YGWktcUXSEsUfYx8lmQkfjoWcwhiTMMjYygjdMjDs2bOnHfJLenYhrApZGPNhAzyG6Rv7uQjPxXpD1xxPn7RdKRcGeDQjSDguUriXobi2YQQYZsJizLgf4fNm5bEk1oYhHSg7bgxGmXSv4+lsmyEQ7Pni+4L1uP0UuHStfpaGeobDWth+Wk5xy8aNz15a3znxsqttW66uartjqm+jEeBLl19+9mXCaLmM78EXFF8aCIb4F2lYGcYN6dWrl0/DS5ph8xkXh27Y/IJkgEQGouOY+fPJU6nRCoMQsAH+6NXF4HvMa0VrAi0RjFdiv2KZdqOU3ij0pjI3AKPXMhgiI+ES14JIsTgVuw7ioybOGsiPfeQ3oxcXgzMS18CkrfRYYRyg7bff3n8p8+XPAInUnRGv4cRw/faFjdBgoLemMLoV25hHtAIwFhJ1ZmBGngX28fLBeClRXzPcJbiwEMlcE12V6X6PmCJ+hIEcEQoWi0KMBy0UcOP5o+XDhB5lJr0UmbjVRCJzrFEerY70WKI+jf1c2LXasqFrtnSFlpVyYbwpRgpHgPDHDwzGhsJNyWeY58wCmWFkI4CzjqBhkEKMnl2XX3aZG5uLuYIz23x+LaCZchCWFo+FSOVecM/5ccSApw0ZIg0RxjOCK4s4OT5XfGcwuSrPV9jSSPyeGc8HY1vxjOD2tXqTj1gz9lNvniuMOD1GG8d4PhhRvpZNwqeW776uvR4BetnwhWK/3Hjx2cuvXuKEHXwBMlUD+flCQ4CEIwBbFr7E0hA9Vh69rhhp2qamIEaJv7gRy8AItaUYc2jh9rGXPGOl2HgpVo695NmmVYsZ1c0QC7ROkB8mNhw/LyAL3mVSVsSBjZHSKycg+QuNc3B/QrdEeDztdQZDJJDbXCa89BAY/MWNF2zcENLmuoEJf2Hg7EUXXeR785CPF1X8noVM4UZZYddmBCNuGwzxSQ8zjBnObdymxnwu/Mli/xq65ljyxM1KuCCMGWWZFhueN7gwF1rcCCZnctvQCGInjsaYEu8XjlpuacnLvHUmethvnzXuE62WxQgfWkJ5nhmpHGGLCObHi/2AsfOx5IdAOAYY3ykmjPlRgPuceDnGI7L9fEb5scN51sqJNjPS1Lrwmc1gaCkCIvD/g/gxJQKtFLx4QuMLhBdJ6N4Ij7POL0vcIrTAhF+MHKM8Ak4RPMy4HFpYZrgeppkZBEvzCzU0fu0PHDjQD3YWP0Y6Yh94IdJtNqxXuD4jVqaVT9Al0zUwmnXcYEILUNgLJi4UET28zMJzxcvhhc4UH0ncSAs3ZkFPGlk6LDfpGvilywus2L8w3oJf4ZyXloj480C9eNETJ5I0azyCMYz7In1oXEu+NDBnRnVmLDdjzrbQGM2XVqlCVupzgbgqlpOlQ9CZ5bseOx4+2+G6HWdZKRem7uBZQmTGz8GzyDx0tJCEItLOz+f7rLPOSuwZybPO55tA9PhwFybGeUYQUMUarYCIKIRq+Bxbfnqb0dLIxMahIcot/Ve5lh3rycln3K6ZGC/qjD0X9FIjTa1bq5xyrPvtXutEdP1lEeAXe3Mav/DSNn454SbilxhuDAJQaZK2aRv4Eu3bt2/B0zK+D24iXpy8qOxLqWCmFA7S4oQA4Xz0BEkSQ+WchiBjmv2J48Hlw3UVa/z6xtVHq87kyZP9vET5eMDtL3/5i683LzDcKOUa7g9rrSq2DF7qSYY7A3cdbgTcdfZiSUrLPmvJoSUOlwQvzqQ8XC9l80zhGk1Kk+8cuF+5J7QaUH6hYRIaei4IyreWo3zni+9HrIZd+Yu95ng5SduVcLHyEHOdc/E33C+EarEGT7jCFxdUQ886bime0/hnzebJ47y0yoY/EuJ1MfcxgofnPino3fLAmecqLuCoN89ofD9uY8RSoTKt7Ja+lKurpd9hXV9JBGgyZiZkfmHxBRafRHTnnXeOyivmZUwafp01tdEcX0xze6n1ouWIF105lsQzXzlwy+IvU1oL+CvWEHa0BliLQL58XG8xz1NSfl5kSS1hSWkbei7yCdGksmxfvCWs2Gu2/IWWlXCxchEAcRFgxwotEQnxiV8LpTf3YjxNGAPXEF9EayHhGpZNWUnXRb2T9ocDgIbl1OK6hE8t3nVdcyIBfkHRVE9wLq0lgwcPrvMlhO/eAgQpoDkETWLFtbMgAVyXpbT4mAuhYKEt9CAxV2EQbTGXaW6WYtLWWhqeOwuI5tpLaXGqNVZNeb0SPk1JW+fKNAHcNzQTY7T8EJdCjARjadBMbAHPHOfX0/Dhw1mVZZyAmvaLv0GIvnDsmOJzKmVIgDix7t27+0lnCXjGaKEptjUnLEvr6RNQcHP6TFVilRJA5NA6QCyOGfE9+NFD0UN8SmPEFNk5tRQBEahuAnRFZyyd0M1lXeer+8paRu3V4tMy7qOuIiUC/OJlkDLG7KAXDQG2BFgS20IvCb68mE9JJgIiIAL5CBDvQ7A67nN+UI0fP94PHpovvfY3LQH16mpa3i32bIwNg3uoOYwYg7BXSXPUQecUAREQARGoDgJydVXHfcp8LcORepu6ss157qa+Vp1PBERABESgMgISPpXxU+5ZBOgJYhMENiUUzllqL5SmrJ/OJQIiIAIikC0CrXODO52crSqpNtVIgAHXGCiPwb4Y5M7mhWqsa8G9xVxEI0aMaHBgscaqg8oVAREQARGoPgKK8am+e6Yai4AIiIAIiIAIlElArq4ywSmbCIiACIiACIhA9RGQ8Km+e6Yai4AIiIAIiIAIlElAwqdMcMomAiIgAiIgAiJQfQQkfKrvnqnGIiACIiACIiACZRKQ8CkTnLKJgAiIgAiIgAhUHwEJn+q7Z6qxCIiACIiACIhAmQQkfMoEp2wiIAIiIAIiIALVR0DCp/rumWosAiIgAiIgAiJQJgEJnzLBKZsIiIAIiIAIiED1EZDwqb57phqLgAiIgAiIgAiUSUDCp0xwyiYCIiACIiACIlB9BCR8qu+eqcYiIAIiIAIiIAJlEpDwKROcsomACIiACIiACFQfAQmf6rtnqrEIiIAIiIAIiECZBCR8ygSnbCIgAiIgAiIgAtVHQMKn+u6ZaiwCIiACIiACIlAmAQmfMsEpmwiIgAiIgAiIQPURkPCpvnumGouACIiACIiACJRJQMKnTHDKJgIiIAIiIAIiUH0EJHyq756pxiIgAiIgAiIgAmUSkPApE5yyiYAIiIAIiIAIVB+B/wO9N/2l2KPKEwAAAABJRU5ErkJggg==)" - ], "metadata": { "id": "cVofNXVW-EMo" - } + }, + "source": [ + "![image.png](data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAj4AAADGCAYAAADSbIrxAAAMTGlDQ1BJQ0MgUHJvZmlsZQAASImVVwdYU8kWnltSIQQIREBK6E0QkRJASggtgPQiiEpIAoQSY0JQsaOLCq5dRLCiqyAuuroCstiwK4ti74sFBWVdLNiVNyGALvvK9+b75s5//znzzzln5pYBgN7Ol0pzUE0AciV5sphgf9aEpGQWqROoAV3ABAZgFF8gl3KiosIBLIPt38vb6wBRtlcclFr/7P+vRUsokgsAQKIgThPKBbkQ/woA3iSQyvIAIEohbz49T6rEayHWkUEHIa5S4gwVblLiNBW+1G8TF8OF+DEAZHU+X5YBgEYP5Fn5ggyoQ4fRAieJUCyB2A9in9zcqUKI50NsA23gnHSlPjvtO52Mv2mmDWny+RlDWBVLfyEHiOXSHP7M/zMd/7vk5igG57CGVT1TFhKjjBnm7XH21DAlVof4vSQtIhJibQBQXCzst1diZqYiJF5lj9oI5FyYM7jOAB0nz4nlDfAxQn5AGMSGEKdLciLCB2wK08VBShuYP7RMnMeLg1gP4iqRPDB2wOaYbGrM4LzX02VczgDfyZf1+6DU/6rIjueo9DHtTBFvQB9zLMiMS4SYCnFAvjghAmINiCPk2bFhAzYpBZnciEEbmSJGGYsFxDKRJNhfpY+VpsuCYgbsd+fKB2PHjmWKeRED+HJeZlyIKlfYYwG/338YC9YjknDiB3VE8gnhg7EIRQGBqthxskgSH6vicT1pnn+MaixuJ82JGrDH/UU5wUreDOI4eX7s4Nj8PLg5Vfp4kTQvKk7lJ16exQ+NUvmD7wPhgAsCAAsoYE0DU0EWELd213fDO1VPEOADGcgAIuAwwAyOSOzvkcBrLCgAf0IkAvKhcf79vSKQD/kvw1glJx7iVFcHkD7Qp1TJBk8gzgVhIAfeK/qVJEMeJIDHkBH/wyM+rAIYQw6syv5/zw+y3xgOZMIHGMXgjCz6oCUxkBhADCEGEW1xA9wH98LD4dUPVmecjXsMxvHNnvCE0EZ4SLhGaCfcmiIulA3zcjxoh/pBA/lJ+z4/uBXUdMX9cW+oDpVxJm4AHHAXOA8H94Uzu0KWO+C3MiusYdp/i+C7FRqwozhRUMoIih/FZvhIDTsN1yEVZa6/z4/K17ShfHOHeobPz/0u+0LYhg23xJZgB7Az2HHsHNaE1QMWdhRrwFqww0o8tOMe9++4wdli+v3JhjrD98y3lVVmUu5U49Tl9FnVlyeakad8GLlTpTNl4ozMPBYHfjFELJ5E4DiK5ezk7AKA8vujer29ju7/riDMlm/cwj8A8D7a19f32zcu9CgAv7jDV8Khb5wNG35a1AA4e0igkOWrOFx5IcA3Bx0+ffrAGJgDGxiPM3ADXsAPBIJQEAniQBKYDL3PhPtcBqaD2WABKAIlYCVYB8rBFrAdVIGfwX5QD5rAcXAaXACXwDVwB+6eDvAc9IC34BOCICSEhjAQfcQEsUTsEWeEjfgggUg4EoMkIalIBiJBFMhsZCFSgqxGypFtSDXyC3IIOY6cQ9qQW8gDpAt5hXxEMVQd1UGNUCt0NMpGOWgYGodOQjPQaWgBughdjpahlegetA49jl5Ar6Ht6HO0FwOYGsbETDEHjI1xsUgsGUvHZNhcrBgrxSqxWqwRrvMVrB3rxj7gRJyBs3AHuIND8HhcgE/D5+LL8HK8Cq/DT+JX8Ad4D/6VQCMYEuwJngQeYQIhgzCdUEQoJewkHCScgs9SB+EtkUhkEq2J7vBZTCJmEWcRlxE3EfcSjxHbiI+IvSQSSZ9kT/ImRZL4pDxSEWkDaQ/pKOkyqYP0nqxGNiE7k4PIyWQJuZBcSt5NPkK+TH5K/kTRpFhSPCmRFCFlJmUFZQelkXKR0kH5RNWiWlO9qXHULOoCahm1lnqKepf6Wk1NzUzNQy1aTaw2X61MbZ/aWbUHah/UtdXt1LnqKeoK9eXqu9SPqd9Sf02j0axofrRkWh5tOa2adoJ2n/Zeg6HhqMHTEGrM06jQqNO4rPGCTqFb0jn0yfQCein9AP0ivVuTommlydXka87VrNA8pHlDs1eLoTVGK1IrV2uZ1m6tc1qd2iRtK+1AbaH2Iu3t2ie0HzEwhjmDyxAwFjJ2ME4xOnSIOtY6PJ0snRKdn3VadXp0tXVddBN0Z+hW6B7WbWdiTCsmj5nDXMHcz7zO/DjCaARnhGjE0hG1Iy6PeKc3Us9PT6RXrLdX75reR32WfqB+tv4q/Xr9ewa4gZ1BtMF0g80Gpwy6R+qM9BopGFk8cv/I24aooZ1hjOEsw+2GLYa9RsZGwUZSow1GJ4y6jZnGfsZZxmuNjxh3mTBMfEzEJmtNjpo8Y+myOKwcVhnrJKvH1NA0xFRhus201fSTmbVZvFmh2V6ze+ZUc7Z5uvla82bzHgsTi/EWsy1qLG5bUizZlpmW6y3PWL6zsrZKtFpsVW/Vaa1nzbMusK6xvmtDs/G1mWZTaXPVlmjLts223WR7yQ61c7XLtKuwu2iP2rvZi+032beNIozyGCUZVTnqhoO6A8ch36HG4YEj0zHcsdCx3vHFaIvRyaNXjT4z+quTq1OO0w6nO2O0x4SOKRzTOOaVs52zwLnC+epY2tigsfPGNox96WLvInLZ7HLTleE63nWxa7PrFzd3N5lbrVuXu4V7qvtG9xtsHXYUexn7rAfBw99jnkeTxwdPN888z/2ef3k5eGV77fbqHGc9TjRux7hH3mbefO9t3u0+LJ9Un60+7b6mvnzfSt+HfuZ+Qr+dfk85tpwszh7OC38nf5n/Qf93XE/uHO6xACwgOKA4oDVQOzA+sDzwfpBZUEZQTVBPsGvwrOBjIYSQsJBVITd4RjwBr5rXE+oeOif0ZJh6WGxYedjDcLtwWXjjeHR86Pg14+9GWEZIIuojQSQvck3kvSjrqGlRv0UTo6OiK6KfxIyJmR1zJpYROyV2d+zbOP+4FXF34m3iFfHNCfSElITqhHeJAYmrE9snjJ4wZ8KFJIMkcVJDMik5IXlncu/EwInrJnakuKYUpVyfZD1pxqRzkw0m50w+PIU+hT/lQCohNTF1d+pnfiS/kt+bxkvbmNYj4ArWC54L/YRrhV0ib9Fq0dN07/TV6Z0Z3hlrMroyfTNLM7vFXHG5+GVWSNaWrHfZkdm7svtyEnP25pJzU3MPSbQl2ZKTU42nzpjaJrWXFknbp3lOWzetRxYm2ylH5JPkDXk68Ee/RWGj+EHxIN8nvyL//fSE6QdmaM2QzGiZaTdz6cynBUEFP83CZwlmNc82nb1g9oM5nDnb5iJz0+Y2zzOft2hex/zg+VULqAuyF/xe6FS4uvDNwsSFjYuMFs1f9OiH4B9qijSKZEU3Fnst3rIEXyJe0rp07NINS78WC4vPlziVlJZ8XiZYdv7HMT+W/di3PH156wq3FZtXEldKVl5f5buqarXW6oLVj9aMX1O3lrW2eO2bdVPWnSt1Kd2ynrpesb69LLysYYPFhpUbPpdnll+r8K/Yu9Fw49KN7zYJN13e7Le5dovRlpItH7eKt97cFrytrtKqsnQ7cXv+9ic7Enac+Yn9U/VOg50lO7/skuxqr4qpOlntXl2923D3ihq0RlHTtSdlz6WfA35uqHWo3baXubdkH9in2Pfsl9Rfru8P2998gH2g9lfLXzceZBwsrkPqZtb11GfWtzckNbQdCj3U3OjVePA3x992NZk2VRzWPbziCPXIoiN9RwuO9h6THus+nnH8UfOU5jsnJpy4ejL6ZOupsFNnTwedPnGGc+boWe+zTec8zx06zz5ff8HtQl2La8vB311/P9jq1lp30f1iwyWPS41t49qOXPa9fPxKwJXTV3lXL1yLuNZ2Pf76zRspN9pvCm923sq59fJ2/u1Pd+bfJdwtvqd5r/S+4f3KP2z/2Nvu1n74QcCDloexD+88Ejx6/lj++HPHoie0J6VPTZ5Wdzp3NnUFdV16NvFZx3Pp80/dRX9q/bnxhc2LX//y+6ulZ0JPx0vZy75Xy17rv971xuVNc29U7/23uW8/vSt+r/++6gP7w5mPiR+ffpr+mfS57Ivtl8avYV/v9uX29Un5Mn7/rwAGlEebdABe7QKAlgQAA54bqRNV58P+gqjOtP0I/CesOkP2FzcAauE/fXQ3/Lu5AcC+HQBYQX16CgBRNADiPAA6duxQHTzL9Z87lYUIzwZbI7+k5aaBf1NUZ9Lv/B7eAqWqCxje/gsy+IMtImMZLAAAAJZlWElmTU0AKgAAAAgABQESAAMAAAABAAEAAAEaAAUAAAABAAAASgEbAAUAAAABAAAAUgEoAAMAAAABAAIAAIdpAAQAAAABAAAAWgAAAAAAAACQAAAAAQAAAJAAAAABAAOShgAHAAAAEgAAAISgAgAEAAAAAQAAAj6gAwAEAAAAAQAAAMYAAAAAQVNDSUkAAABTY3JlZW5zaG90r8HhGAAAAAlwSFlzAAAWJQAAFiUBSVIk8AAAAttpVFh0WE1MOmNvbS5hZG9iZS54bXAAAAAAADx4OnhtcG1ldGEgeG1sbnM6eD0iYWRvYmU6bnM6bWV0YS8iIHg6eG1wdGs9IlhNUCBDb3JlIDYuMC4wIj4KICAgPHJkZjpSREYgeG1sbnM6cmRmPSJodHRwOi8vd3d3LnczLm9yZy8xOTk5LzAyLzIyLXJkZi1zeW50YXgtbnMjIj4KICAgICAgPHJkZjpEZXNjcmlwdGlvbiByZGY6YWJvdXQ9IiIKICAgICAgICAgICAgeG1sbnM6ZXhpZj0iaHR0cDovL25zLmFkb2JlLmNvbS9leGlmLzEuMC8iCiAgICAgICAgICAgIHhtbG5zOnRpZmY9Imh0dHA6Ly9ucy5hZG9iZS5jb20vdGlmZi8xLjAvIj4KICAgICAgICAgPGV4aWY6VXNlckNvbW1lbnQ+U2NyZWVuc2hvdDwvZXhpZjpVc2VyQ29tbWVudD4KICAgICAgICAgPGV4aWY6UGl4ZWxYRGltZW5zaW9uPjU3NDwvZXhpZjpQaXhlbFhEaW1lbnNpb24+CiAgICAgICAgIDxleGlmOlBpeGVsWURpbWVuc2lvbj4xOTg8L2V4aWY6UGl4ZWxZRGltZW5zaW9uPgogICAgICAgICA8dGlmZjpSZXNvbHV0aW9uVW5pdD4yPC90aWZmOlJlc29sdXRpb25Vbml0PgogICAgICAgICA8dGlmZjpYUmVzb2x1dGlvbj4xNDQvMTwvdGlmZjpYUmVzb2x1dGlvbj4KICAgICAgICAgPHRpZmY6WVJlc29sdXRpb24+MTQ0LzE8L3RpZmY6WVJlc29sdXRpb24+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgrknrQzAAA14ElEQVR4Ae2dB5gURfqHC8EEKCpiDijqKYpZxIQohsOcFfGMp+cJio9i4Mz55MR0KuZMMGBWFEyYzqyYsxjAiIpgRvnPW3++tra3Z3ZC727Pzu97nt1OVdXVb/dM/6a+r6pa9ejRY6aTiYAIiIAIiIAIiEANEJitBq5RlygCIiACIiACIiACnoCEjx4EERABERABERCBmiEg4VMzt1oXKgIiIAIiIAIiIOGjZ0AEREAEREAERKBmCEj41Myt1oWKgAiIgAiIgAhI+OgZEAEREAEREAERqBkCEj41c6t1oSIgAiIgAiIgAhI+egZEQAREQAREQARqhoCET83cal2oCIiACIiACIiAhI+eAREQAREQAREQgZohIOFTM7daFyoCIiACIiACItCmlhF06NDB9enTx3Xu3NnNM888JaF4+OGH3UMPPVRSHiUWAREQAREQARFoXgI1K3wQPQMGDHBt27Yt6w5suummPp/ET1n4lEkEREAEREAEmoVAzbq6aOkpV/TYnUL89O7d2za1FAEREAEREAERyDiBmhU+uLfSMImfNCiqDBEQAREQARFoGgI16+oqNaan0O1A/Jjrq1C6QsemTZvmJk6c6MaMGeOmTp1aKKmOiYAIiIAIiIAIlEmgZlt8yuTVaNkQYt26dfNxR8QfyURABERABERABNInIOGTPtOKSiTuiPgjmQiIgAiIgAiIQPoEatbVlT7K9EqsJP6oZ8+erl+/fr4yjz7yiBs5alR6FVNJIiACIiACIlDlBCR8MngDK4k/WmKJJdwCCyzgr6rLcstl8OpUJREQAREQARFoPgJydTUfe51ZBERABERABESgiQmoxaeJgTfW6ZZeemk322yzuY4dO0anaN++vVtmmWX89qeffup+++03v77kkku6Nm3auF9++cVNnjzZtWvXzm2wwQZu8cUXd4899ph7//33ozJYIe1qq63mVl55Zffdd9+5Dz74wL355pvu999/r5MuaWPOOed0a621lltqqaV8/d5++233xhtvuJ9++ikpeZ191GudddbxeadMmeJefvllN2nSpDppkjaIk6KuXbp08eekvpzz+++/T0qufSIgAiIgAjVEQMKnhdzsc845p96V8OIfMmSI33/VVVe5+++/36+fe+65fvnzzz+7xx9/3G2++eZR3k6dOkV5FllkEXfKKadErrMoUW5lxowZbsSIEe7uu+8Od9dZP/TQQ92GG27oxUd4YObMmV5gDRs2LK94Ovroo93aa6/tWrVqFWb14o1rffHFF+vsZwOBdsghh3gRhwiM24QJE9yFF14oARQHo20REAERqCECFQsfRi6udAwb4635r4xE0yznmmuuOqInPCtB0oiI1q1bh7ujdUTG3nvv7RAx99xzT7TfVvr37+8oI8kQMxtvvLHrutJK7pBcurgde+yxvpUovp/t2Wef3Q0ePNiLrttvv71OkjPPPDNq4apzYNYGrVbnDh3qDvrHP9wff/yRlET7REAEREAEWjiB1rlg2JMrucYPP/zQ/yo3l0q5ZTW16Mn6VBPwKMVef/1134qCmCHAGWNAxAsuuMCNHz/et5DQSoPttttufmn/cHlxH1977TX31FNPeXfSGWec4UUGab7++msvbq688kpHK9Fiiy3mcGFhq666qrvtttu8API7cv923HFHt+2229qmL/Paa6919957r2+Vodca4gdXFq65559/PkqLmOrVq5ffRpzceeedjvPSwrPssstGk8l27drVtzaZu41Wq8022ywqhzJHjhzp880///yRCxA+Cy64oHvuueeitFoRAREQARGoHQIVt/iAyibqLLflp6lFT0u8vcSwYAgRM0aARswUsi+++MIPmhimIa7GhA0jStN6Yy0kw4cPd/fdd5+7/PLLfRZcSoiQ8Dx9+/aNigtdbOy85JJL3HvvvecOPPBAn4bYoksvvdSvI2xCwXTCCSe4d955xx/76KOP3AsvvOAuvvhit9BCC/mWqP3339/hLsPWXXddv+QfIu7ss8+OthF+++23n9tqq638vlVWWSU6phUREAEREIHaIlA/EKLM60f8lNpKwakkesoEnkI2xMwxxxxTr6RPPvnEt5bQYnL++edHoscSfvvtt+7zzz+3TbdSzmVltuKKK0ZxOQQTW1yRHWc5duxY9+OPP/pdtMBY9/0ddtghSvbqq69GoifamVu54ooros1QwORzyVni0aNHu7feesv/0TomEwEREAERqE0CqbT4GLpSW34keoxc8ywRPj/88EO9kyNqcF+ZEc9DTzB6Zs0777x+dxg83CaIA1pzzTUtm2+hiTZiK7jg6EWGmQgKBRTCh0DruIWCy+pCGlx0JoRwu5544onujjvucK+88oovAhFGC5JMBERABESgtgmkKnxAWaz4kejJ/oPXo0cPPwo0vbuKtVC80FU+nxGzE++ZNffcc0fJ99xzT8dfITN3HGkeyY1SzVQfCDSMec/4IwaIetDKNG7cuLy9yHwm/RMBERABEWjxBFJzdYWkGnJ7SfSEtLK5TlzPkUce6ULRQw8uAqQJhmY9ycIJVr/66qukJHn30bJUioVd3anXEUcc4cWNBXFTFi4wxNABBxzgiE9ab731SjmF0oqACIiACLQwAqW9aUq4+HwtPxI9JUBspqS4jKxnFVWghxSigUEQzXAbhYHUtp8BDhdddFG/SRByKcYAixarM2bMGEdgdSGzHl1hGoKu+aO1aqONNnLEHJlLjLIRR/GA6zC/1kVABERABFo2gUYTPmCLix+Jnup4mHbeeeeoooiesIdUdCDPCuLI3F0Ww5Mnab3diCZrYaI31xNPPFEvTbE7nn76accfRvf+k086yXWYbz6/Tc+xpKBrf1D/REAEREAEWjSBRhU+kDPxE19v0VSr/OIY58aMHl5Jxtg4ScbYQWbdu3e31XpLemfNN0uI0C2d1p3PPvssEj477bRTXuGDS2yOOeaIgqKJDbrmmmv8OQiUprzQEGP/vegid/zxx/vdSUHTYXqti4AIiIAItFwCjRLjE8eF+AkFUPy4ttMjQPyNWbkveFpezDbdZBNbjZaHH354FETMztlzIsSMVj2bEwxBkhSgvEmuTBM9nMtcWrfeeqsV48tnIMS44bZi7B7cVauvvro/zLxfv/76q3eT0TU+aTyp6dOnR0Xli0+KEmhFBERABESgxRJo9BafFksuoxf28ccfRzUj1uawww7zA/q99NJLdWJ0okQJK6QlNgbDPXT11Vf7AQppaWG/jbtjWS2Ghm0Cixn/hxGYMcQLrq8nn3zSj/pM7A2Tlpo9+OCDturH7WGQQjuOaCI9E6cSz0MvLaadsN5c/fr18xOXUgATmFrg8sEHH+y2335798wzz3hRRR7r6k5aBkOUiYAIiIAI1CYBCZ8Wdt8RLYgPRAq9ngjw5Y/pIUaNGlXU1TKGD1N6WHAyQsdEhRVAq4n1qmJm+NCYuJTWGAt+RiyZkArT0Qp40003hbv8gInEFDEtBsZozvzFDfHCnF5mTD5KfZmYlXqRP6nFCDY2Savl1VIEREAERKB2CDSJq6t2cDb/lfJivygXz5LU46mU2g0aNMj35oq7hSiX8XCYOsLMenHZNsvTTjvN965ibq+4MZUGriqbqiI8TvqBAwe6m2++OXKZhceJ4WGwQuoXXiPXjRB64IEHHK6vuHEdDGY4YMCAOqNOx9NpWwREQAREoGUTaJVzJSQPyNKyr9sxCWeW7bjjjquoesxizhxa7du39wP40UJi822VUjBdwJdffnk/sSeBy2GX9mLLwTWFmwrxMWHCBN8iVWxe4oRwU1F3hIvFDzWUnxYvWpyYEmPSpEmOIO1yrr+h8+i4CIiACIhAdRGQq6u67lfRtUUgIDIqNVpVmOOqEiPgOpyBvZSyaL0pZyZ1WoDiI0OXcl6lFQEREAERaJkEatbVZT2Jsnhbs1y3LPJSnURABERABESgWAI1K3zC8WaKhdVU6bJct6ZioPOIgAiIgAiIQGMQqFnhw5QINit4Y4Att0zqRN1kIiACIiACIiAC6RNonRvO/+T0i81+icSdECzLGDRt27aNxoZprprj3mKahhEjRjh6PclEQAREQAREQATSJ1CzvbrSR6kSRUAEREAEREAEsk6gZl1dWb8xqp8IiIAIiIAIiED6BCR80meqEkVABERABERABDJKQMInozdG1RIBERABERABEUifgIRP+kxVogiIgAiIgAiIQEYJSPhk9MaoWiIgAiIgAiIgAukTkPBJn6lKFAEREAEREAERyCgBCZ+M3hhVSwREQAREQAREIH0CEj7pM1WJIiACIiACIiACGSUg4ZPRG6NqiYAIiIAIiIAIpE9Awid9pipRBERABERABEQgowQkfDJ6Y1QtERABERABERCB9AlI+KTPVCWKgAiIgAiIgAhklICET0ZvjKolAiIgAiIgAiKQPgEJn/SZqkQREAEREAEREIGMEpDwyeiNUbVEQAREQAREQATSJyDhkz5TlSgCIiACIiACIpBRAhI+Gb0xqpYIiIAIiIAIiED6BCR80meqEkVABERABERABDJKQMInozdG1RIBERABERABEUifgIRP+kxVogiIgAiIgAiIQEYJSPhk9MaoWiIgAiIgAiIgAukTkPBJn6lKFAEREAEREAERyCgBCZ+M3hhVSwREQAREQAREIH0CEj7pM1WJIiACIiACIiACGSUg4ZPRG6NqiYAIiIAIiIAIpE9Awid9pipRBERABERABEQgowQkfDJ6Y1QtERABERABERCB9AlI+KTPVCWKgAiIgAiIgAhklICET0ZvjKolAiIgAiIgAiKQPgEJn/SZqkQREAEREAEREIGMEpDwyeiNUbVEQAREQAREQATSJ9Am/SJVYloEOnTo4Pr06eM6d+7s5plnnpKKffjhh91DDz1UUh4lFgEREAEREIGWTkDCJ6N3GNEzYMAA17Zt27JquOmmm/p8Ej9l4VMmERABERCBFkpArq6M3lhaesoVPXZJiJ/evXvbppYiIAIiIAIiUPMEJHwy+gjg3krDJH7SoKgyREAEREAEWgoBuboyeidLjekpdBmIH3N9FUpX6Ni0adPcxIkT3ZgxY9zUqVMLJdUxERABERABEcgsAQmfzN6abFUMIdatWzfXpUsXd9FFF5UtftZaay3Xvn37ehdnwuqbb76pdyzrO+aaay7Xs2dPt/TSS7uOHTu6KVOmuE8++cSNHz/e/fTTT2VVf8MNN3StW7d23333nZswYUJZZSiTCIiACIhAfQISPvWZaE8BAsQdEX80atSoAqnyHxo0aJBr0yb/Y/fHH3+49957zw0fPty98cYb+QvKwJE555zTHX744W7NNdd0s81W32u83377uVdffdUNHTq0JAG03nrruYEDB/ornDlzptt3333djz/+mIErVhVEQAREoPoJ1P+2rv5r0hU0MoG04o+SqomAWGGFFdwpp5wSvfyT0jX3vvnnn99dcsklbu21104UPdSPa1lttdXcsGHDXKdOnYqu8g477BClbdWqldt5552jba2IgAiIgAhURiD/T+/KylXuFkwgrfij0aNHu6+//trxcl9yySXdcsst5xBVs88+u6eHu2ehhRZyxx13XOZoDhkyxM0777xRvT788EP37LPPunfffddfBy695Zdf3h9v166dI/0BBxzgaNEqZHPPPbdbZpll6iTp1auXu+GGG+rs04YIiIAIiEB5BCR8yuOmXCkQeOKJJ9ynn35apyTcYAidVVZZxe+n9Wfbbbd1d999d510zbnRd4893HzzzRdV4bacgBsZuP6IyUHUbbfddm6vvfbywo64JlxWV199dZQvaWWnnXby6cNjCCxiq95///1wt9ZFQAREQATKICBXVxnQlKXxCMyYMcO7uZ577rnoJP369cvrTiIRLiVcTrvssosjLe6lQnFEUcGzVojVWX/99d0eOUGz5557OlpraHnJZ1tvs0106IUXXqgjeqIDuZW77rrLIe7MNt98c1vNu9xkk02iY88//3y03rdv32hdKyIgAiIgAuUTUItP+eyUsxEJ0HPsmmuu8aKG3k30mnr00UfrnXHXXXd1tJKEQsdiZAiSPvXUUwsGFh966KEOl1o8OJmg4scee8zH5/z+++/ReWmBQihhpLniiiuiY0kr1157rS8fdx51pCXrtddeS0rqll12WceI3dj06dN97zkYkJd81LEhV1liwdopAiIgAiIQEWjxwoeRiysdw8Zoaf4rI9H4S3ox0Zqyzjrr+JPRWhIXPrjAdtttt7yVIWYIYXLSSScluon69+/vBVVSAYiNjTfe2HVdaSV3SC6dWTgS9meffea7rtuxpOX333/vu7YvtdRS/jBl5hM+tDiZPfXUU+6HH35wkydNcosvsYTv2r7FFlu4+++/35JoKQIiIAIiUAaBFu/qYq4qBEulJtFTKcHS8xMobLbgggvaql/S5XvvvfeO9r3zzju+2/ixxx7rbr/9dmetNLTOsC9uO+64oyNo2Ayhcdppp7mjjz7aPfLII741h2OdcsHVBx98sCWr0zvryy+/jPYXWvnqq6+iw/HrsAO05qy66qq26WOE2LgvN2Ck2VZbbWWrWoqACIiACJRJoMULH7hUKn4kesp8uirMRk8ps3DeMkSCjXPD8ccff9wHRD/99NO+ZWfEiBHuqKOOisQPgci0DoUWxsxcddVV7rzzznOvvPKK45x0U7/yyiuj5BtssEG0bq4odkzKtcYUY1988UWULAyKjnbmVjbbbDPfqsM+BJUN5Pjggw9G17Hooou6BRZYIMymdREQAREQgRIJ1ITwgUm54keip8QnKsXkH3/8cVSadXFnB+4m4n4wgqEvvvhivx7+Y+TksKUvFD4rrrhi1HMKV1SS+2js2LHRoIGMzGxd+OfOrZuFLTm2L2kZtgxZfFA83dZbbx3t4lk1I6bnzTfftE23++67R+taEQEREAERKJ1AzQgf0JQqfiR6Sn+g0syB4DAjkNjM4n7Yfvnll6MWETtuy+uuu85WI+HCDkZaNiOOKJ9dcMEF7vrrr/d/NnLyn7Vw0XhD+fLbfuKFIguuw/YxGOJiiy3mN7nOeNf9O+64w5L63mfRhlZEQAREQARKJtDig5vjROzXdEMBzxI9cXJNvx0O5Pfbb79FFUAomNGyk89++eUX3yJEbyr+aG1h30q5gGWzyZMn22q95Ysvvuj4Cy2cewvXUzEWpvsxYe6usBUHV1t4rZTPuEA///yzQwjyh3CL16uYeiiNCIiACIiAczUnfLjpDYkfiZ5sfDTo3m1G926zsCWooZniERHW1R0BMjE3w3wYp1Osu8rOzaShTEaKFTsNxSKLLGLZHa61uDGGkBkjWBNjFLc55pgj2sUUFhI+EQ6tiIAIiEBJBGrK1RWSyef2kugJKTXv+sorrxxVgK7jZhb4y3bSTO+WjmUoGCwf4sWMKTFKsTCgefHFFy8qayh8wusgM4MthoMlEsuEoIr/heMM0U0/X6xQURVSIhEQARGoYQI1K3y453HxI9GTnU8C81wxTYPZnXfeaat+XBzb6Ny5s63WWzLVgwVBEztjrS3hNBnFihcrfEzQvZweVgRKF7IlcmPwhOKKoOnQGG3ajC74uLTy/dnghYggpsOQiYAIiIAIlE6gJl1dISZze7EvXA/TaL1pCdDqMWjQoOikxNXQ1dyMMXu23HJLv7nGGmv4ION4XAwHmRTUbMqUKbbq3V220b17d1utt2TwQ+t+vv/++7tp06a5zz//3Asom6D0wAMPdEceeWS9vLbjoIMOslU/gvRHH30UbXOdjARtxhxlhebjYjoOG5WaAR1vueUWy6qlCIiACIhAkQRqusXHGCF4JHqMRvMuGcSPiTzD8WqGDh1ap1JMJcGoxhgtOoMHD65znI2uXbu6Hj16RPvHjRsXrdOyZ0IJNxPzc8WNObNM9OAaQ/SYMe6PGSMyn3HGGfWmvOA4gyGGgdQ33nijZfNLWm3MhcX1FBI9ZKB3l/VuI8Cb1iSZCIiACIhAaQRqvsWnNFxKnSYBWmToJo54scH5wsBlznXPPff4Xk3x8w4bNixqFerWrZsfywfxijsL0cP8W9aN/PXXX3e33XZbVARj/4wcOTIa+ZlRnBEoTz75pHczIZiYqNSMQQRDY5TnPn36RG4uWm3oOo9woZfYwgsv7N107dq1i7IxJlHczcUUFGaU2ZAhjogxMsGDYBsyZEhD2XRcBERABEQgICDhE8DQatMSYOLNfEa8CxOVhrObh2mfeeYZ98ADD0QuL+JowtGYLS1ChIlK48ZYOauvvno0TQSxOknxOoipm266KZ7dz/91/PHHO0QXhmAjGDsMyLZMU3MtRowkHRqxRWGr1ujRo8PDedeJMcK9huHmk4mACIiACJRGQK6u0ngpdYUEzFUTL4b9jLHD9A60xhDPkk/0WF6mlTj99NPruKHsGIHADG6Iu8mCgu2YLZmb6/LLL/etPLbPlnSTx6V16aWX2q46S8pEUF122WXR9BJ1EgQbHXJTZuyzzz7BHlcnOJku9WEMUp2EsY1wCgu66YeDOcaSalMEREAERCCBQKtcs344GG1CEu1qDgLEjWTZCMTNkuEuw11FXA7iiYEAcWkVa3QPp/UGAcaAgaXk5RzMJca4Qx07dnSIJkTJEUccUWd0Z+p0wgkneIFXbL2UTgREQAREIF0CEj7p8kytNAmf1FA2W0H0/DrrrLPqdGdnAMW426vZKqgTi4AIiEANEpCrK6M3PexFlLUqZrluWWJFoHX//v0ds8ZjjM9zzjnnZKmKqosIiIAI1BwBBTdn9JbTMmCBs1mrInWTFU+A7vhMS0Hvsoam2Ci+VKUUAREQAREoh4BafMqh1gR56L1jM4I3wemKPgV1CkcvLjpjjSeku7pET40/BLp8ERCBTBBonRsT5ORM1ESVqEOAHk6MVkycCIGzzT03E+4tRkweMWKEXuB17pQ2REAEREAEqomAgpur6W6priIgAiIgAiIgAhURkKurInzKLAIiIAIiIAIiUE0EJHyq6W6priIgAiIgAiIgAhURkPCpCJ8yi4AIiIAIiIAIVBMBCZ9quluqqwiIgAiIgAiIQEUEJHwqwqfMIiACIiACIiAC1URAwqea7pbqKgIiIAIiIAIiUBEBCZ+K8CmzCIiACIiACIhANRGQ8Kmmu6W6ioAIiIAIiIAIVERAwqcifMosAiIgAiIgAiJQTQQkfKrpbqmuIiACIiACIiACFRGQ8KkInzKLgAiIgAiIgAhUEwEJn2q6W6qrCIiACIiACIhARQQkfCrCp8wiIAIiIAIiIALVREDCp5ruluoqAiIgAiIgAiJQEQEJn4rwKbMIiIAIiIAIiEA1EZDwqaa7pbqKgAiIgAiIgAhUREDCpyJ8yiwCIiACIiACIlBNBCR8quluqa4iIAIiIAIiIAIVEZDwqQifMouACIiACIiACFQTAQmfarpbqqsIiIAIiIAIiEBFBCR8KsKnzCIgAiIgAiIgAtVEQMKnmu6W6ioCIiACIiACIlARAQmfivApswiIgAiIgAiIQDURkPCppruluoqACIiACIiACFREQMKnInzKLAIiIAIiIAIiUE0EJHyq6W6priIgAiIgAiIgAhURkPCpCJ8yi4AIiIAIiIAIVBOBNtVUWdU12wQ6dOjg+vTp4zp37uzmmWeeiir78MMPu4ceeqiiMpRZBERABERABOIEJHziRLRdFgFEz4ABA1zbtm3Lyh/PtOmmm/pdEj9xMtoWAREQARGohIBcXZXQU96IAC09aYkeKxTx07t3b9vUUgREQAREQAQqJiDhUzFCFQAB3FuNYRI/jUFVZYqACIhA7RKQ8Knde5/qlVca02OVIbYnbhI/cSLaFgEREAERKJeAhE+55JSvUQgQ0yPx0yhoVagIiIAIiECOgISPHoPMEZD4ydwtUYVEQAREoMUQkPBpMbeyZV2IxE/Lup+6GhEQARHICgEJn6zcCdWjHgGJn3pItEMEREAERKBCAhI+FQJU9sYlIPHTuHxVugiIQOMSWGKJJdzAgQPdX//618Y9kUovmoAGMCwalRI2FwHED0bvrtBs246Hx7QuAiIgAlkgcNppp7n27du7DTfc0E2dOtX973//y0K1aroOEj41ffur5+JN3JjYsZrbth23/Vpmh0DPnj1dv379fIUefeQRN3LUqOxUrpFqwkjmQ4YM8aV/+umnjpdf2nbQQQe5tdZayxd78cUXu1deeSXtU5RUXlNcc0kVykjiNm3+fM0usMACGalVbVdDrq7avv9VdfWIm3xd3avqQmqssjT184XPX5fllquJq+cXvl3zsssu2yjXvFyOpZ1jkUUWaZRzlFJoU1xzKfXJStprrrnGff755+6FF15wDzzwQFaqVdP1+FOK1jQGXXy1ELCWHWvpqZZ6q54iIAK1SYAfa0k/2GqTRjauWsInG/dBtZhF4IwzzsgMi06dOrl1113XzTvvvO7ll192b731lvvjjz/cQgst5Nq1a+d+++03hxsjn/ErfPXVV3csP/zwQ/fSSy+577//PjE55+IX88yZM93EiRN9Gs679tprO1pM2DdhwgQfI5BYQLCTuq2zzjpuqaWWclOmTPF1nzRpUpDiz1VG3F5wwQX9Dn6V/vTTT26FFVbw1/3DDz+422677c/Es9bIg4uFaUomT57s3n33XX998YRLL720m2222VzHjh2jQ1zjMsss47dhB8O4wRduiy66qPviiy/ciy++6L788st4smi7nGuIMhe5wnV07drV0cqCS+ejjz7yzwPMQrNng7qb4eqwa+Z+5HsGFl54YX/faMXhfr/55pvuq6++smL8cvbZZ/fPAxtzzz13dIy8nGPGjBnuk08+ifaHK6U8F2G+htZLuWZ7zikThq1atfLPOGzffvtt99RTT9U7XTFc6mWatQMmlE0dP/vsM/faa68V/MxaOdxvnvHlunRxM37/3b366qvunXfe8Z9/S1PskuefzzLGNfMdkmR8nlZcccXouedzxZ8sfQKtevToMTP9YlVirRFobsFy3HHHpYYcocL12JeVFYwoGTFihNtqq63c/PPP73fvuuuudjha8mI8+eSToxdUdCC38vXXX7sTTzyx3gvt6quvdjbtB7EbnJ+XRGic/5FcjMywYcPC3XXWjz76aP8i4YUSGgLjnHPO8SIi3P+vf/3LrbHGGn7XmDFj3CabbOLmmmuuKEl4fbnvCte/f/86xy3hd9995/773/9GcSZMWHvdddfZ4cTlVVdd5e6///7oGC982Cy++OLRPlvhhc51P/bYY7YrWpZyDVGmEla22247t8ceezhER9wQfhdccIH74IMP/KHrr7++jiCJpycOJx7vs88++zgm+W3dunU8uXv//ffd0KFDo+dlm222caQvZH379vUCKExT6nMR5m1ovZRrDp/zK664wh1wwAFeHHOOqbln6O8HHhidrhQuUaZZK8svv7w76qijos9peJwA43PPPde98cYb4e5ofccdd3S77babC2NzOMjnb/To0e6mm26K0hazQvwVwgv797//7V1eYb7VVlvNf67sOyU89uOPPzqYjR8/Ptyt9QoJKManQoDK3rII8Ivr/PPPryd6uErEBEG6SV9QRoEvy6E5gUErTZLRuoJA4Is5n11yySX1RA9pOT8uvp122ikx67HHHutbDOKih8S8tAcPHuz4Us9nvHxD0ROmQ4wdeeSReY/PN9987vjjj3dLLrmkz5ZUh7C8+DrnRZgliR7SwvXQQw91RxxxRDxrne1C11AnYZEbe+65p/vb3/6WKHooYrHFFnNnnnmmXxZTZMiFe/Kf//zHIWaSRA/ldcm1OJDGjof5850vnqbS5yLfeYrdH6+P5TswJ3JoWYlbOVzCMvjBcOqpp+b9nPLD5JRTTvGtmmE+1rfYYgvHPY+LHo5xHbvssosX52yXY3EWtOjyoy3fdwo/IAYMGOA222yzck6nPHkIyNWVB4x21yaBw3IvV3vJ8AuPX1rPPvusf6FvtNFGeQWN0Tr77LNdh5wIwH7++WfHr2FcFriP9ttvPy8cKJ+X+GGHHWbZ6iz50v3ll1/8L0POjbuNL0hrcUD4xF1Qe++9d9TDh6b0u+66yz3xxBPejcUxXtDY7rvv7u677z5ffp2TztrgmnHH0MROHTDybr755rNSON/kT8sT7rutt97arbfeev5FwZc6L1lahXCTnXTSSf7FtuWWWzpaizBcONYS9N577/l9vPx4uVuL1+8518Ktt97qr59zI9Zwm2Gca4MNNnBPPvmk3076l3QNSeka2kd9QqH48ccfu3HjxnmXCWOy0FLGveSPlxfXTYsV7jxaDf/xj3/4U/Ac8FxgoWtshx12cJ1z7g2Me0bw69ixYz1LWhVXWWUV/7LFRYXwpMXrwQcfjNyKh/zzn67TrJYE8tFNGnah+zCt58JXMs+/Uq45XgT1/Tzngvog9yyZ27gcLlYuAppnyYQLLSbDhw93r7/+un+G9s99Bu3zefjhh3uu06ZN89lpefn73/9uRbnnn3/e3Xnnnf7eUCfcx1i3bt38vcFtVqnxzJgYoh7jcvfxpZxbne8a/sydiUjkMwcvWeUEJHwqZ6gSWgiBNddcM3qRcEm8rHgZYc8995wXG7iwVl55Zb8v/o9fisTVYIgGvkRNPPCl/vTTT/tma16UxIDwRUvcTtxw6yCKvvnmG3+Ilzy/Ymky50tyzjnn9AKKFypGr6Ftt93Wr/PvhBNO8OKEdWIKuAZrbufc+++/f153GW6VZ555hqyRhe4u6nv66adHxy688EKHgEHUYRYvxLq5ElZddVU2veFmiL8wcHcgFDCunRcSsT0Y4orrR1BZ120EBS/5fLESSdfgCyvxn4k1siEmcJ3YOeFA66C5rcwtibDDvv32W7/kH9cUv2b2hwH6V155pRdV7Md46SIoeMlixKlgxGBZWdNz4tKcodxn2+8T5v6l+VxYmUnLUq45zA8XXvz2nNuxcrhYXp4lhCKGkPhnThzaZ5A4Nz6DsEbUIo569+7t7rjjDp+eZ8xECPsQTGYMTUBZVreDDz7Yt8TY8XKWuHYRyWa0QnEfMeIJb7zxRv99QT35ccCzQKyhrHIC9dsZKy9TJYhAVRLgV7bZpJxQMdFj+1gifOzlF+5nvVevXtEuvrTsC9d28uvz8ccft03fchFtBCvEH8RfBgS52i9TkhJka8avUTMLwrRtWxJPYUZLQpIhJuKih3T80hw5cqT/46URN+J07JcoX9AmAuLp8m2vv/760SG6+5roiXbmVhAzdg5+BXfv3j08HK3nu4YoQQkrc8wxR8HUvJxokWNJYK61yBXMFBy8+eabI67WWzE47Fu9bDufK8SOJy3Tei6Syk5jH6I5/pxTbiVcaFk1u+GGG+p9Bvns0ppoxo8PjGcw3kpkaWx5+eWXR5/9Up9xKyNcIvwKGSL39ttv988Xz1j8+6RQXh0rTKBN4cM6KgK1QyD8Mru/wHgbuFKSLHw5IUDC8ix92OMm7Pljx1mG7pBw//Tp06PYozDweqWVVoqS5TtvWGaYN8qYW4n3ILJjBOSGg+MRd4DriXgeWp+wkEkpAoAWqPBXL4IxyWhx4dro7YXxguPXe9zyXUM8XTHbCL59993XJ+WaeFHz0sSFaC8t3CrlGuWHRgsALYbEOSEgYWMWrtu+hpZpPRcNnafc4+baiuevhIsFESNw4uXYedhvPGlRxHChmtHqkvTZ5Tg/XnheuT98jvL10LOyCi3Jy2fann9aD3FR47a0chGB/MnSJSDhky5PlVbFBAh6NLMmZ9tuaBl22SYtAdINWTxPQ+nzHbc4AI7jbuOvkJlYKZQmfowveuJd6OGE8EnLQvGHmDBBkVQ+MTYmfMylmJQurX285IjpsfgmulXjmjnkkEO8KwsBhEskbIkr9dy4XOi9xHQG9jIutYx86Zviuch37kr3l8OFHx7WavPrr7/mrQItKXfffXed4+FnEcFIB4OGDFdipa4nejYS78fni/tFDB5/xMhRNkI7n0BsqH46np+AhE9+NjpSYwTCF0+pLQfhF2ex2BpypRRbjn3ZF5ve4hiKTQ8XhJzF4Vg+flXjfmJZjpiiHHqDmYVBubYvXIZxM2HrWpgm7XXcG8Qw0bPLfpnDj9YZRCB/BIszOm+pBk/cmvEWMpiaACyXK3Vp7Oei1OstNn25XMLn0+Lfij2nxQUVm5504bNbSr4wLeKZ2KNBgwZFXd45Tn0I4ueP54+4PXsmwvxaL4+AhE953JSrBRL4JRcsbN25ebEx5k6xFqaNxxHkKyMpliVf2kL7EQwm2hiLp6EWCIuVKVRmeCwMPuba7r33Xt/SYc3xpL322mujoNIwb0PrYctaQy/q8MVGa0xTmY28S9dyujsTI4UrxAQksWG0BjFGSylGTzATPbRCMEYULUx2f7inoyqY16yxn4tSrrWUtOVyCQfptM9xsefF5WTGmEwElzdkofu3obSFjuNuoyURMc9wDPQWxI1sn2ni+S699NI6Pc4KladjDROQ8GmYkVLUCIFvcwOoWVdXmrEZqbVYI0gTUUCTNX8MdMZ2UxiDB5oooM78ikzTcMOY0aX60Ucftc2Kl4g0XvR8ySMCaOHIF8RpYwRx0lBoVlyJIgtgMEEbPBJ33zHHHBP1tuJlxX0v9p4TH2L3jPgoxmoJhWSRVSqYrLGfi4InL/NgJVzgZ5/BUlvKEOAWGM1wDrfcckuZV1B+Nlo0Eb/88SzRm5JxgzDc8PSOTEtslV/LlpFTvbpaxn3UVaRAgCHtzfjllWT8kuRLKcnwy5vhGslnxC/kKyNfnkL7w3rnG9yQ/LSolBOfE75EksQgx8M0heqadGz6rHFUOJZvVGLqTfdxs7TFnZUbLonzoMWFPxMpdpwWJwYuNKHD/bSu55am0DIc4BLhlyR6Ko1jauznotD1lXusUi7WEkhrHGNMJRnuI4QNfwwZgNmYUqwjYgs9z/k6B5C3FGMwTnu+4p9bnitGiA57vTGujywdAsnf4OmUrVJEoKoIhEPRxwft40J4udGLx1wc8YsLe5Hg/ghf1JaWfcSN0HQdjnljx8tZht1zaRUJB92z8viyprWCYEoLELZjDS3DQNG99tqrTnLE1HnnnVcnniQeuxS24CT1lrk11zpmxjgp8VGt4Y77w8Qi8RtJQw1YGWktcUXSEsUfYx8lmQkfjoWcwhiTMMjYygjdMjDs2bOnHfJLenYhrApZGPNhAzyG6Rv7uQjPxXpD1xxPn7RdKRcGeDQjSDguUriXobi2YQQYZsJizLgf4fNm5bEk1oYhHSg7bgxGmXSv4+lsmyEQ7Pni+4L1uP0UuHStfpaGeobDWth+Wk5xy8aNz15a3znxsqttW66uartjqm+jEeBLl19+9mXCaLmM78EXFF8aCIb4F2lYGcYN6dWrl0/DS5ph8xkXh27Y/IJkgEQGouOY+fPJU6nRCoMQsAH+6NXF4HvMa0VrAi0RjFdiv2KZdqOU3ij0pjI3AKPXMhgiI+ES14JIsTgVuw7ioybOGsiPfeQ3oxcXgzMS18CkrfRYYRyg7bff3n8p8+XPAInUnRGv4cRw/faFjdBgoLemMLoV25hHtAIwFhJ1ZmBGngX28fLBeClRXzPcJbiwEMlcE12V6X6PmCJ+hIEcEQoWi0KMBy0UcOP5o+XDhB5lJr0UmbjVRCJzrFEerY70WKI+jf1c2LXasqFrtnSFlpVyYbwpRgpHgPDHDwzGhsJNyWeY58wCmWFkI4CzjqBhkEKMnl2XX3aZG5uLuYIz23x+LaCZchCWFo+FSOVecM/5ccSApw0ZIg0RxjOCK4s4OT5XfGcwuSrPV9jSSPyeGc8HY1vxjOD2tXqTj1gz9lNvniuMOD1GG8d4PhhRvpZNwqeW776uvR4BetnwhWK/3Hjx2cuvXuKEHXwBMlUD+flCQ4CEIwBbFr7E0hA9Vh69rhhp2qamIEaJv7gRy8AItaUYc2jh9rGXPGOl2HgpVo695NmmVYsZ1c0QC7ROkB8mNhw/LyAL3mVSVsSBjZHSKycg+QuNc3B/QrdEeDztdQZDJJDbXCa89BAY/MWNF2zcENLmuoEJf2Hg7EUXXeR785CPF1X8noVM4UZZYddmBCNuGwzxSQ8zjBnObdymxnwu/Mli/xq65ljyxM1KuCCMGWWZFhueN7gwF1rcCCZnctvQCGInjsaYEu8XjlpuacnLvHUmethvnzXuE62WxQgfWkJ5nhmpHGGLCObHi/2AsfOx5IdAOAYY3ykmjPlRgPuceDnGI7L9fEb5scN51sqJNjPS1Lrwmc1gaCkCIvD/g/gxJQKtFLx4QuMLhBdJ6N4Ij7POL0vcIrTAhF+MHKM8Ak4RPMy4HFpYZrgeppkZBEvzCzU0fu0PHDjQD3YWP0Y6Yh94IdJtNqxXuD4jVqaVT9Al0zUwmnXcYEILUNgLJi4UET28zMJzxcvhhc4UH0ncSAs3ZkFPGlk6LDfpGvilywus2L8w3oJf4ZyXloj480C9eNETJ5I0azyCMYz7In1oXEu+NDBnRnVmLDdjzrbQGM2XVqlCVupzgbgqlpOlQ9CZ5bseOx4+2+G6HWdZKRem7uBZQmTGz8GzyDx0tJCEItLOz+f7rLPOSuwZybPO55tA9PhwFybGeUYQUMUarYCIKIRq+Bxbfnqb0dLIxMahIcot/Ve5lh3rycln3K6ZGC/qjD0X9FIjTa1bq5xyrPvtXutEdP1lEeAXe3Mav/DSNn454SbilxhuDAJQaZK2aRv4Eu3bt2/B0zK+D24iXpy8qOxLqWCmFA7S4oQA4Xz0BEkSQ+WchiBjmv2J48Hlw3UVa/z6xtVHq87kyZP9vET5eMDtL3/5i683LzDcKOUa7g9rrSq2DF7qSYY7A3cdbgTcdfZiSUrLPmvJoSUOlwQvzqQ8XC9l80zhGk1Kk+8cuF+5J7QaUH6hYRIaei4IyreWo3zni+9HrIZd+Yu95ng5SduVcLHyEHOdc/E33C+EarEGT7jCFxdUQ886bime0/hnzebJ47y0yoY/EuJ1MfcxgofnPino3fLAmecqLuCoN89ofD9uY8RSoTKt7Ja+lKurpd9hXV9JBGgyZiZkfmHxBRafRHTnnXeOyivmZUwafp01tdEcX0xze6n1ouWIF105lsQzXzlwy+IvU1oL+CvWEHa0BliLQL58XG8xz1NSfl5kSS1hSWkbei7yCdGksmxfvCWs2Gu2/IWWlXCxchEAcRFgxwotEQnxiV8LpTf3YjxNGAPXEF9EayHhGpZNWUnXRb2T9ocDgIbl1OK6hE8t3nVdcyIBfkHRVE9wLq0lgwcPrvMlhO/eAgQpoDkETWLFtbMgAVyXpbT4mAuhYKEt9CAxV2EQbTGXaW6WYtLWWhqeOwuI5tpLaXGqNVZNeb0SPk1JW+fKNAHcNzQTY7T8EJdCjARjadBMbAHPHOfX0/Dhw1mVZZyAmvaLv0GIvnDsmOJzKmVIgDix7t27+0lnCXjGaKEptjUnLEvr6RNQcHP6TFVilRJA5NA6QCyOGfE9+NFD0UN8SmPEFNk5tRQBEahuAnRFZyyd0M1lXeer+8paRu3V4tMy7qOuIiUC/OJlkDLG7KAXDQG2BFgS20IvCb68mE9JJgIiIAL5CBDvQ7A67nN+UI0fP94PHpovvfY3LQH16mpa3i32bIwNg3uoOYwYg7BXSXPUQecUAREQARGoDgJydVXHfcp8LcORepu6ss157qa+Vp1PBERABESgMgISPpXxU+5ZBOgJYhMENiUUzllqL5SmrJ/OJQIiIAIikC0CrXODO52crSqpNtVIgAHXGCiPwb4Y5M7mhWqsa8G9xVxEI0aMaHBgscaqg8oVAREQARGoPgKK8am+e6Yai4AIiIAIiIAIlElArq4ywSmbCIiACIiACIhA9RGQ8Km+e6Yai4AIiIAIiIAIlElAwqdMcMomAiIgAiIgAiJQfQQkfKrvnqnGIiACIiACIiACZRKQ8CkTnLKJgAiIgAiIgAhUHwEJn+q7Z6qxCIiACIiACIhAmQQkfMoEp2wiIAIiIAIiIALVR0DCp/rumWosAiIgAiIgAiJQJgEJnzLBKZsIiIAIiIAIiED1EZDwqb57phqLgAiIgAiIgAiUSUDCp0xwyiYCIiACIiACIlB9BCR8qu+eqcYiIAIiIAIiIAJlEpDwKROcsomACIiACIiACFQfAQmf6rtnqrEIiIAIiIAIiECZBCR8ygSnbCIgAiIgAiIgAtVHQMKn+u6ZaiwCIiACIiACIlAmAQmfMsEpmwiIgAiIgAiIQPURkPCpvnumGouACIiACIiACJRJQMKnTHDKJgIiIAIiIAIiUH0EJHyq756pxiIgAiIgAiIgAmUSkPApE5yyiYAIiIAIiIAIVB+B/wO9N/2l2KPKEwAAAABJRU5ErkJggg==)" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "dPd9i6_t7ERJ" + }, + "outputs": [], "source": [ "\"\"\"\n", " {\n", @@ -173,12 +175,21 @@ " ]\n", "}\n", "\"\"\"" - ], - "metadata": { - "id": "dPd9i6_t7ERJ" - }, - "execution_count": null, - "outputs": [] + ] } - ] + ], + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "name": "python3" + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 } diff --git a/poetry.lock b/poetry.lock index edc2b949f..6116b1618 100644 --- a/poetry.lock +++ b/poetry.lock @@ -44,7 +44,7 @@ testing = ["bitsandbytes", "datasets", "diffusers", "evaluate", "parameterized", [[package]] name = "adalflow" -version = "0.2.5" +version = "0.2.6" description = "The Library to Build and Auto-optimize LLM Applications" optional = false python-versions = ">=3.9, <4.0" @@ -67,6 +67,8 @@ tqdm = "^4.66.4" [package.extras] anthropic = ["anthropic (>=0.31.1,<0.32.0)"] +azure = ["azure-core (>=1.24.0,<2.0.0)", "azure-identity (>=1.12.0,<2.0.0)"] +bedrock = ["boto3 (>=1.35.19,<2.0.0)"] cohere = ["cohere (>=5.5.8,<6.0.0)"] datasets = [] faiss-cpu = ["faiss-cpu (>=1.8.0,<2.0.0)"] @@ -503,10 +505,12 @@ files = [ [package.dependencies] click = ">=8.0.0" +ipython = {version = ">=7.8.0", optional = true, markers = "extra == \"jupyter\""} mypy-extensions = ">=0.4.3" packaging = ">=22.0" pathspec = ">=0.9.0" platformdirs = ">=2" +tokenize-rt = {version = ">=3.2.0", optional = true, markers = "extra == \"jupyter\""} [package.extras] colorama = ["colorama (>=0.4.3)"] @@ -2250,22 +2254,6 @@ files = [ [package.dependencies] jsonpointer = ">=1.9" -[[package]] -name = "jsonpickle" -version = "3.2.2" -description = "Python library for serializing arbitrary object graphs into JSON" -optional = false -python-versions = ">=3.7" -files = [ - {file = "jsonpickle-3.2.2-py3-none-any.whl", hash = "sha256:87cd82d237fd72c5a34970e7222dddc0accc13fddf49af84111887ed9a9445aa"}, - {file = "jsonpickle-3.2.2.tar.gz", hash = "sha256:d425fd2b8afe9f5d7d57205153403fbf897782204437882a477e8eed60930f8c"}, -] - -[package.extras] -docs = ["furo", "rst.linker (>=1.9)", "sphinx"] -packaging = ["build", "twine"] -testing = ["bson", "ecdsa", "feedparser", "gmpy2", "numpy", "pandas", "pymongo", "pytest (>=3.5,!=3.7.3)", "pytest-benchmark", "pytest-benchmark[histogram]", "pytest-checkdocs (>=1.2.3)", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-ruff (>=0.2.1)", "scikit-learn", "scipy", "scipy (>=1.9.3)", "simplejson", "sqlalchemy", "ujson"] - [[package]] name = "jsonpointer" version = "3.0.0" @@ -4456,22 +4444,6 @@ files = [ {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, ] -[[package]] -name = "pyvis" -version = "0.3.2" -description = "A Python network graph visualization library" -optional = false -python-versions = ">3.6" -files = [ - {file = "pyvis-0.3.2-py3-none-any.whl", hash = "sha256:5720c4ca8161dc5d9ab352015723abb7a8bb8fb443edeb07f7a322db34a97555"}, -] - -[package.dependencies] -ipython = ">=5.3.0" -jinja2 = ">=2.9.6" -jsonpickle = ">=1.4.1" -networkx = ">=1.11" - [[package]] name = "pywin32" version = "306" @@ -5600,6 +5572,17 @@ webencodings = ">=0.4" doc = ["sphinx", "sphinx_rtd_theme"] test = ["pytest", "ruff"] +[[package]] +name = "tokenize-rt" +version = "6.1.0" +description = "A wrapper around the stdlib `tokenize` which roundtrips." +optional = false +python-versions = ">=3.9" +files = [ + {file = "tokenize_rt-6.1.0-py2.py3-none-any.whl", hash = "sha256:d706141cdec4aa5f358945abe36b911b8cbdc844545da99e811250c0cee9b6fc"}, + {file = "tokenize_rt-6.1.0.tar.gz", hash = "sha256:e8ee836616c0877ab7c7b54776d2fefcc3bde714449a206762425ae114b53c86"}, +] + [[package]] name = "tokenizers" version = "0.19.1" @@ -6462,4 +6445,4 @@ multidict = ">=4.0" [metadata] lock-version = "2.0" python-versions = ">=3.11, <4.0" -content-hash = "df5b3eaad85fc2f943506d095b2e3f7094982d55d461f40a7be13d9bb742fc6f" +content-hash = "1fdfe039ade0d41d28cab1d52f8f9fddcb23599178d433e31702386b2b1c5b2e" diff --git a/pyproject.toml b/pyproject.toml index c064d819a..e174e6161 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,6 +18,7 @@ packages = [ python = ">=3.11, <4.0" adalflow = { path = "adalflow", develop = true } openai = "^1.34.0" +black = { extras = ["jupyter"], version = "^24.10.0" } [tool.poetry.group.dev.dependencies] @@ -33,7 +34,6 @@ pgvector = "^0.2.5" cohere = "^5.5.8" pydot = "^2.0.0" matplotlib = "^3.9.0" -pyvis = "^0.3.2" ollama = "^0.2.1" torch = ">=2.0, <3.0" textgrad = "^0.1.4" diff --git a/tutorials/adalflow_embedder.py b/tutorials/adalflow_embedder.py new file mode 100644 index 000000000..50bb663d9 --- /dev/null +++ b/tutorials/adalflow_embedder.py @@ -0,0 +1,138 @@ +import os +from getpass import getpass +from adalflow.core.embedder import Embedder, BatchEmbedder +from adalflow.components.model_client import OpenAIClient, TransformersClient +from adalflow.core.types import Embedding, EmbedderOutput +from adalflow.core.functional import normalize_vector +from typing import List +from adalflow.core.component import Component +from copy import deepcopy + + +class DecreaseEmbeddingDim(Component): + def __init__(self, old_dim: int, new_dim: int, normalize: bool = True): + super().__init__() + self.old_dim = old_dim + self.new_dim = new_dim + self.normalize = normalize + assert self.new_dim < self.old_dim, "new_dim should be less than old_dim" + + def call(self, input: List[Embedding]) -> List[Embedding]: + output: EmbedderOutput = deepcopy(input) + for embedding in output.data: + old_embedding = embedding.embedding + new_embedding = old_embedding[: self.new_dim] + if self.normalize: + new_embedding = normalize_vector(new_embedding) + embedding.embedding = new_embedding + return output.data + + def _extra_repr(self) -> str: + repr_str = f"old_dim={self.old_dim}, new_dim={self.new_dim}, normalize={self.normalize}" + return repr_str + + +def setup_api_keys(): + # Prompt user to enter their API keys securely + openai_api_key = getpass("Please enter your OpenAI API key: ") + groq_api_key = getpass("Please enter your GROQ API key: ") + + # Set environment variables + os.environ["OPENAI_API_KEY"] = openai_api_key + os.environ["GROQ_API_KEY"] = groq_api_key + print("API keys have been set.") + + +def test_openai_embedder(): + print("\nTesting OpenAI Embedder:") + model_kwargs = { + "model": "text-embedding-3-small", + "dimensions": 256, + "encoding_format": "float", + } + + query = "What is the capital of China?" + queries = [query] * 100 + + embedder = Embedder(model_client=OpenAIClient(), model_kwargs=model_kwargs) + + # Test single query + output = embedder(query) + print( + f"Single query - Length: {output.length}, Dimension: {output.embedding_dim}, Normalized: {output.is_normalized}" + ) + + # Test batch queries + output = embedder(queries) + print(f"Batch queries - Length: {output.length}, Dimension: {output.embedding_dim}") + + +def test_local_embedder(): + print("\nTesting Local Embedder (HuggingFace):") + model_kwargs = {"model": "thenlper/gte-base"} + local_embedder = Embedder( + model_client=TransformersClient(), model_kwargs=model_kwargs + ) + + query = "What is the capital of China?" + queries = [query] * 100 + + # Test single query + output = local_embedder(query) + print( + f"Single query - Length: {output.length}, Dimension: {output.embedding_dim}, Normalized: {output.is_normalized}" + ) + + # Test batch queries + output = local_embedder(queries) + print( + f"Batch queries - Length: {output.length}, Dimension: {output.embedding_dim}, Normalized: {output.is_normalized}" + ) + + +def test_custom_embedder(): + print("\nTesting Custom Embedder with Dimension Reduction:") + model_kwargs = {"model": "thenlper/gte-base"} + local_embedder_256 = Embedder( + model_client=TransformersClient(), + model_kwargs=model_kwargs, + output_processors=DecreaseEmbeddingDim(768, 256), + ) + + query = "What is the capital of China?" + output = local_embedder_256(query) + print( + f"Reduced dimension output - Length: {output.length}, Dimension: {output.embedding_dim}, Normalized: {output.is_normalized}" + ) + + +def test_batch_embedder(): + print("\nTesting Batch Embedder:") + model_kwargs = {"model": "thenlper/gte-base"} + local_embedder = Embedder( + model_client=TransformersClient(), model_kwargs=model_kwargs + ) + batch_embedder = BatchEmbedder(embedder=local_embedder, batch_size=100) + + query = "What is the capital of China?" + queries = [query] * 1000 + + print("Starting batch processing...") + response = batch_embedder(queries) + print(f"Batch processing complete - Total queries processed: {len(queries)}") + print(f"Response - Length: {response.length}, Dimension: {response.embedding_dim}") + + +def main(): + # Setup API keys + setup_api_keys() + + # Run all tests + test_openai_embedder() + test_local_embedder() + test_custom_embedder() + test_batch_embedder() + + +if __name__ == "__main__": + main() diff --git a/tutorials/multimodal_client_testing_examples.py b/tutorials/multimodal_client_testing_examples.py new file mode 100644 index 000000000..ee3a171dd --- /dev/null +++ b/tutorials/multimodal_client_testing_examples.py @@ -0,0 +1,121 @@ +""" +OpenAI Vision and DALL-E Example with Error Testing + +To test with different API keys: + +1. First run with a valid key: + export OPENAI_API_KEY='your_valid_key_here' + python tutorials/vision_dalle_example.py + +2. Then test with an invalid key: + export OPENAI_API_KEY='abc123' + python tutorials/vision_dalle_example.py + +The script will show different GeneratorOutput responses based on the API key status. +""" + +from adalflow.core import Generator +from adalflow.components.model_client.openai_client import OpenAIClient +from adalflow.core.types import ModelType + + +class ImageGenerator(Generator): + """Generator subclass for image generation.""" + + model_type = ModelType.IMAGE_GENERATION + + +def test_basic_generation(): + """Test basic text generation""" + client = OpenAIClient() + gen = Generator( + model_client=client, model_kwargs={"model": "gpt-4o-mini", "max_tokens": 100} + ) + + print("\n=== Testing Basic Generation ===") + response = gen({"input_str": "Hello, world!"}) + print(f"Response: {response}") + + +def test_invalid_image_url(): + """Test Generator output with invalid image URL""" + client = OpenAIClient() + gen = Generator( + model_client=client, + model_kwargs={ + "model": "gpt-4o-mini", + "images": "https://invalid.url/nonexistent.jpg", + "max_tokens": 300, + }, + ) + + print("\n=== Testing Invalid Image URL ===") + response = gen({"input_str": "What do you see in this image?"}) + print(f"Response with invalid image URL: {response}") + + +def test_invalid_image_generation(): + """Test DALL-E generation with invalid parameters""" + client = OpenAIClient() + gen = ImageGenerator( + model_client=client, + model_kwargs={ + "model": "dall-e-3", + "size": "invalid_size", # Invalid size parameter + "quality": "standard", + "n": 1, + }, + ) + + print("\n=== Testing Invalid DALL-E Parameters ===") + response = gen({"input_str": "A cat"}) + print(f"Response with invalid DALL-E parameters: {response}") + + +def test_vision_and_generation(): + """Test both vision analysis and image generation""" + client = OpenAIClient() + + # 1. Test Vision Analysis + vision_gen = Generator( + model_client=client, + model_kwargs={ + "model": "gpt-4o-mini", + "images": "https://upload.wikimedia.org/wikipedia/en/7/7d/Lenna_%28test_image%29.png", + "max_tokens": 300, + }, + ) + + vision_response = vision_gen( + {"input_str": "What do you see in this image? Be detailed but concise."} + ) + print("\n=== Vision Analysis ===") + print(f"Description: {vision_response.raw_response}") + + # 2. Test DALL-E Image Generation + dalle_gen = ImageGenerator( + model_client=client, + model_kwargs={ + "model": "dall-e-3", + "size": "1024x1024", + "quality": "standard", + "n": 1, + }, + ) + + # For image generation, input_str becomes the prompt + response = dalle_gen( + {"input_str": "A happy siamese cat playing with a red ball of yarn"} + ) + print("\n=== DALL-E Generation ===") + print(f"Generated Image URL: {response.data}") + + +if __name__ == "__main__": + print("Starting OpenAI Vision and DALL-E test...\n") + + # Run all tests - they will show errors if API key is invalid/empty + test_basic_generation() + test_invalid_image_url() + test_invalid_image_generation() + test_vision_and_generation() diff --git a/tutorials/rag/config.py b/tutorials/rag/config.py new file mode 100644 index 000000000..2a6d383f8 --- /dev/null +++ b/tutorials/rag/config.py @@ -0,0 +1,23 @@ +configs = { + "embedder": { + "batch_size": 100, + "model_kwargs": { + "model": "text-embedding-3-small", + "dimensions": 256, + "encoding_format": "float", + }, + }, + "retriever": { + "top_k": 2, + }, + "generator": { + "model": "gpt-3.5-turbo", + "temperature": 0.3, + "stream": False, + }, + "text_splitter": { + "split_by": "word", + "chunk_size": 400, + "chunk_overlap": 200, + }, +} diff --git a/tutorials/rag/rag.py b/tutorials/rag/rag.py new file mode 100644 index 000000000..ab2488792 --- /dev/null +++ b/tutorials/rag/rag.py @@ -0,0 +1,105 @@ +from typing import Optional, Any, List + +import adalflow as adal +from adalflow.core.db import LocalDB + +from adalflow.core.types import ModelClientType + +from adalflow.core.string_parser import JsonParser +from adalflow.components.retriever.faiss_retriever import FAISSRetriever +from adalflow.components.data_process import ( + RetrieverOutputToContextStr, + ToEmbeddings, + TextSplitter, +) + +from adalflow.components.model_client import OpenAIClient + +from tutorials.rag.config import configs + + +def prepare_data_pipeline(): + splitter = TextSplitter(**configs["text_splitter"]) + embedder = adal.Embedder( + model_client=ModelClientType.OPENAI(), + model_kwargs=configs["embedder"]["model_kwargs"], + ) + embedder_transformer = ToEmbeddings( + embedder=embedder, batch_size=configs["embedder"]["batch_size"] + ) + data_transformer = adal.Sequential( + splitter, embedder_transformer + ) # sequential will chain together splitter and embedder + return data_transformer + + +rag_prompt_task_desc = r""" +You are a helpful assistant. + +Your task is to answer the query that may or may not come with context information. +When context is provided, you should stick to the context and less on your prior knowledge to answer the query. + +Output JSON format: +{ + "answer": "The answer to the query", +}""" + + +class RAG(adal.Component): + + def __init__(self, index_path: str = "index.faiss"): + super().__init__() + + self.db = LocalDB.load_state(index_path) + + self.transformed_docs: List[adal.Document] = self.db.get_transformed_data( + "data_transformer" + ) + embedder = adal.Embedder( + model_client=ModelClientType.OPENAI(), + model_kwargs=configs["embedder"]["model_kwargs"], + ) + # map the documents to embeddings + self.retriever = FAISSRetriever( + **configs["retriever"], + embedder=embedder, + documents=self.transformed_docs, + document_map_func=lambda doc: doc.vector, + ) + self.retriever_output_processors = RetrieverOutputToContextStr(deduplicate=True) + + self.generator = adal.Generator( + prompt_kwargs={ + "task_desc_str": rag_prompt_task_desc, + }, + model_client=OpenAIClient(), + model_kwargs=configs["generator"], + output_processors=JsonParser(), + ) + + def generate(self, query: str, context: Optional[str] = None) -> Any: + if not self.generator: + raise ValueError("Generator is not set") + + prompt_kwargs = { + "context_str": context, + "input_str": query, + } + response = self.generator(prompt_kwargs=prompt_kwargs) + return response + + def call(self, query: str) -> Any: + retrieved_documents = self.retriever(query) + # fill in the document + for i, retriever_output in enumerate(retrieved_documents): + retrieved_documents[i].documents = [ + self.transformed_docs[doc_index] + for doc_index in retriever_output.doc_indices + ] + + print(f"retrieved_documents: \n {retrieved_documents}\n") + context_str = self.retriever_output_processors(retrieved_documents) + + print(f"context_str: \n {context_str}\n") + + return self.generate(query, context=context_str), retrieved_documents diff --git a/use_cases/config.py b/use_cases/config.py index 6eb5b3ed9..895ed097c 100644 --- a/use_cases/config.py +++ b/use_cases/config.py @@ -41,7 +41,7 @@ gpt_4o_model = { "model_client": OpenAIClient(), "model_kwargs": { - "model": "gpt-4o", + "model": "gpt-4o-mini", "temperature": 1, "top_p": 0.99, "max_tokens": 1000, diff --git a/use_cases/question_answering/bbh/object_count/task.py b/use_cases/question_answering/bbh/object_count/task.py index 5aebb47b2..6f5571f85 100644 --- a/use_cases/question_answering/bbh/object_count/task.py +++ b/use_cases/question_answering/bbh/object_count/task.py @@ -40,7 +40,7 @@ def __init__(self, model_client: adal.ModelClient, model_kwargs: Dict): few_shot_demos = adal.Parameter( data=None, role_desc="To provide few shot demos to the language model", - requires_opt=True, + requires_opt=False, param_type=ParameterType.DEMOS, ) @@ -60,6 +60,14 @@ def call( self, question: str, id: str = None ) -> Union[adal.GeneratorOutput, adal.Parameter]: output = self.llm_counter(prompt_kwargs={"input_str": question}, id=id) + print(f"output: {output}, training: {self.training}") + if self.training: + if output.full_response.error and "429" in output.full_response.error: + raise ValueError("Rate limit exceeded") + else: + if output.error and "429" in output.error: + print("rate limit exceeded:") + raise ValueError("Rate limit exceeded") return output diff --git a/use_cases/question_answering/bbh/object_count/train_new.py b/use_cases/question_answering/bbh/object_count/train_new.py index 280f7c1a6..48309aa71 100644 --- a/use_cases/question_answering/bbh/object_count/train_new.py +++ b/use_cases/question_answering/bbh/object_count/train_new.py @@ -130,22 +130,36 @@ def train( print(trainer) train_dataset, val_dataset, test_dataset = load_datasets() - trainer.fit( + ckpt, _ = trainer.fit( train_dataset=train_dataset, val_dataset=val_dataset, test_dataset=test_dataset, resume_from_ckpt=resume_from_ckpt, ) + return ckpt if __name__ == "__main__": + import sys + import json - train( - debug=True, + ckpt = train( + debug=False, max_steps=12, strategy="constrained", exclude_input_fields_from_bootstrap_demos=True, ) + print(f"ckpt: {ckpt}") + # Save ckpt to a file passed as an argument + if len(sys.argv) > 1: # Check if a file path is provided + with open(sys.argv[1], "w") as f: + json.dump({"ckpt": ckpt}, f) # train_diagnose(**gpt_3_model) - # train_diagnose_teacher(**gpt_4o_model) + # train_diagnose_teacher(**gpt_4o_model) # 4omini works well as an optimizer too + # /Users/liyin/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_49c63_run_1.json + # 0.72 -> 0.9 val + # 0.79 -> 0.92 test + # 0.86->0.94 val, 0.79 -> 0.93 with only negative gradients /Users/liyin/.adalflow/ckpt/ObjectCountAdalComponent/constrained_max_steps_12_7a649_run_1.json + + # without gradients -> 0.9 on tests diff --git a/use_cases/question_answering/bbh/word_sorting/diagnose.py b/use_cases/question_answering/bbh/word_sorting/diagnose.py index 2faa9e763..af2243ef5 100644 --- a/use_cases/question_answering/bbh/word_sorting/diagnose.py +++ b/use_cases/question_answering/bbh/word_sorting/diagnose.py @@ -38,34 +38,34 @@ def __init__( # eval_fn = lambda question, gt_answer, pred_answer: 1 super().__init__(task=task, eval_fn=eval_fn) - def handle_one_task_sample(self, sample: Example): + def prepare_task(self, sample: Example): return self.task.call, {"question": sample.question, "id": sample.id} - def evaluate_one_sample( - self, sample: Example, y_pred: adal.GeneratorOutput - ) -> float: + def prepare_eval(self, sample: Example, y_pred: adal.GeneratorOutput) -> float: y_label = "" if ( y_pred is not None and y_pred.data is not None ): # if y_pred and y_pred.data: might introduce bug when the data is 0 y_label = y_pred.data - return self.eval_fn( - question=sample.question, gt_answer=sample.answer, pred_answer=y_label - ) + return self.eval_fn, { + "question": sample.question, + "gt_answer": sample.answer, + "pred_answer": y_label, + } def evaluate_one_sample(): - trainset, valset, testset = load_datasets(task_name="BBH_word_sorting") + trainset, valset, testset = load_datasets(task_name="word_sorting") adal_component = WordSortingAdalComponent( **gpt_3_model, llm_judge_model_config=gpt_3_model ) example = trainset[1] - call, kwargs = adal_component.handle_one_task_sample(example) + call, kwargs = adal_component.prepare_task(example) output = call(**kwargs) print(f"output: {output}") print(f"trainset[0]: {example}") - score = adal_component.evaluate_one_sample(example, output) + score = adal_component.prepare_eval(example, output) print(score) @@ -74,7 +74,7 @@ def diagnose( model_kwargs: Dict, ) -> Dict: - trainset, valset, testset = load_datasets(task_name="BBH_word_sorting") + trainset, valset, testset = load_datasets(task_name="word_sorting") adal_component = WordSortingAdalComponent( model_client, model_kwargs, llm_judge_model_config=gpt_3_model ) @@ -91,3 +91,4 @@ def diagnose( # evaluate_one_sample() diagnose(**gpt_3_model) + # 0.88 train, 0.84 test, 0.72 val diff --git a/use_cases/question_answering/bbh/word_sorting/task.py b/use_cases/question_answering/bbh/word_sorting/task.py index ace7d1e62..0d1d487e2 100644 --- a/use_cases/question_answering/bbh/word_sorting/task.py +++ b/use_cases/question_answering/bbh/word_sorting/task.py @@ -69,9 +69,7 @@ def test_word_sorting_task(): task_pipeline = QuestionAnswerTaskPipeline(**gpt_3_model) print(task_pipeline) - train_dataset, val_dataset, test_dataset = load_datasets( - task_name="BBH_word_sorting" - ) + train_dataset, val_dataset, test_dataset = load_datasets(task_name="word_sorting") example = train_dataset[0] question = example.question diff --git a/use_cases/question_answering/bbh/word_sorting/train.py b/use_cases/question_answering/bbh/word_sorting/train.py index 4d1af9e3e..125182062 100644 --- a/use_cases/question_answering/bbh/word_sorting/train.py +++ b/use_cases/question_answering/bbh/word_sorting/train.py @@ -52,23 +52,23 @@ def __init__( text_optimizer_model_config=text_optimizer_model_config, ) - def handle_one_task_sample(self, sample: Example): + def prepare_task(self, sample: Example): return self.task.call, {"question": sample.question, "id": sample.id} - def evaluate_one_sample( - self, sample: Example, y_pred: adal.GeneratorOutput - ) -> float: + def prepare_eval(self, sample: Example, y_pred: adal.GeneratorOutput) -> float: y_label = "" if ( y_pred is not None and y_pred.data is not None ): # if y_pred and y_pred.data: might introduce bug when the data is 0 y_label = y_pred.data - return self.eval_fn( - question=sample.question, gt_answer=sample.answer, pred_answer=y_label - ) + return self.eval_fn, { + "question": sample.question, + "gt_answer": sample.answer, + "pred_answer": y_label, + } - def handle_one_loss_sample(self, sample: Example, pred: adal.Parameter): + def prepare_loss(self, sample: Example, pred: adal.Parameter): # prepare gt parameter y_gt = adal.Parameter( name="y_gt", @@ -92,19 +92,6 @@ def handle_one_loss_sample(self, sample: Example, pred: adal.Parameter): } } - # def configure_backward_engine(self): - # super().configure_backward_engine_helper(**self.backward_engine_model_config) - - # def configure_teacher_generator(self): - # super().configure_teacher_generator_helper(**self.teacher_model_config) - - # def configure_optimizers( - # self, - # ): # TODO: train the text optimizer and the demo optimizer at the same time - # to = super().configure_text_optimizer_helper(**self.text_optimizer_model_config) - # do = super().configure_demo_optimizer_helper() - # return to + do - def train( train_batch_size=4, # larger batch size is not that effective, probably because of llm's lost in the middle @@ -141,9 +128,7 @@ def train( ) print(trainer) - train_dataset, val_dataset, test_dataset = load_datasets( - task_name="BBH_word_sorting" - ) + train_dataset, val_dataset, test_dataset = load_datasets(task_name="word_sorting") for dataset in [train_dataset, val_dataset, test_dataset]: for example in dataset: example.question = example.question.replace( diff --git a/use_cases/question_answering/bbh/word_sorting/train_paper.py b/use_cases/question_answering/bbh/word_sorting/train_paper.py index 00a848304..2c4b0e157 100644 --- a/use_cases/question_answering/bbh/word_sorting/train_paper.py +++ b/use_cases/question_answering/bbh/word_sorting/train_paper.py @@ -42,7 +42,6 @@ def __init__( eval_fn=eval_fn, eval_fn_desc="exact_match: 1 if str(y) == str(y_gt) else 0", ) - # eval_fn = lambda question, gt_answer, pred_answer: 1 super().__init__( task=task, eval_fn=eval_fn, @@ -52,23 +51,23 @@ def __init__( text_optimizer_model_config=text_optimizer_model_config, ) - def handle_one_task_sample(self, sample: Example): + def prepare_task(self, sample: Example): return self.task.call, {"question": sample.question, "id": sample.id} - def evaluate_one_sample( - self, sample: Example, y_pred: adal.GeneratorOutput - ) -> float: + def prepare_eval(self, sample: Example, y_pred: adal.GeneratorOutput) -> float: y_label = "" if ( y_pred is not None and y_pred.data is not None ): # if y_pred and y_pred.data: might introduce bug when the data is 0 y_label = y_pred.data - return self.eval_fn( - question=sample.question, gt_answer=sample.answer, pred_answer=y_label - ) + return self.eval_fn, { + "question": sample.question, + "gt_answer": sample.answer, + "pred_answer": y_label, + } - def handle_one_loss_sample(self, sample: Example, pred: adal.Parameter): + def prepare_loss(self, sample: Example, pred: adal.Parameter): # prepare gt parameter y_gt = adal.Parameter( name="y_gt", @@ -92,19 +91,6 @@ def handle_one_loss_sample(self, sample: Example, pred: adal.Parameter): } } - # def configure_backward_engine(self): - # super().configure_backward_engine_helper(**self.backward_engine_model_config) - - # def configure_teacher_generator(self): - # super().configure_teacher_generator_helper(**self.teacher_model_config) - - # def configure_optimizers( - # self, - # ): # TODO: train the text optimizer and the demo optimizer at the same time - # to = super().configure_text_optimizer_helper(**self.text_optimizer_model_config) - # do = super().configure_demo_optimizer_helper() - # return to + do - def train( train_batch_size=4, # larger batch size is not that effective, probably because of llm's lost in the middle @@ -141,9 +127,7 @@ def train( ) print(trainer) - train_dataset, val_dataset, test_dataset = load_datasets( - task_name="BBH_word_sorting" - ) + train_dataset, val_dataset, test_dataset = load_datasets(task_name="word_sorting") trainer.fit( train_dataset=train_dataset, val_dataset=val_dataset, diff --git a/use_cases/text_grad_2.0_train.py b/use_cases/text_grad_2.0_train.py new file mode 100644 index 000000000..37ff320d3 --- /dev/null +++ b/use_cases/text_grad_2.0_train.py @@ -0,0 +1,58 @@ +import subprocess +import tempfile +import json + +# List of experiments to run +object_count = "use_cases/question_answering/bbh/object_count/train_new.py" +hotpot_qa_multi_hop_rag = "benchmarks/hotpot_qa/adal_exp/train_multi_hop_rag.py" + +ckpt_values = [] +experiments = [ + object_count, + # hotpot_qa_multi_hop_rag, +] + +# Optional: Arguments for each experiment (if needed) +experiment_args = { + object_count: "", + # hotpot_qa_multi_hop_rag: "", +} +ckpt_values = {} + + +def run_experiment(script, args): + try: + # Use a temporary file to store the ckpt + with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as temp_file: + temp_path = temp_file.name + + print(f"Running {script} with args: {args}") + subprocess.run( + f"python {script} {temp_path} {args}", + check=True, + shell=True, + text=True, + ) + + # Read the ckpt value from the temporary file + with open(temp_path, "r") as f: + data = json.load(f) + ckpt = data.get("ckpt") + print(f"Checkpoint from {script}: {ckpt}") + return ckpt + + except subprocess.CalledProcessError as e: + print(f"Experiment {script} failed with error: {e}") + return None + + +if __name__ == "__main__": + for experiment in experiments: + args = experiment_args.get(experiment, "") + ckpt = run_experiment(experiment, args) + if ckpt: + ckpt_values[experiment] = ckpt + + print("\nAll Checkpoints:") + for experiment, ckpt in ckpt_values.items(): + print(f"{experiment}: {ckpt}")